code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def load_template(path_or_buffer):
from itertools import groupby
from operator import itemgetter
path_or_buffer = _stringify_path(path_or_buffer)
if is_file_like(path_or_buffer):
templates = json.load(path_or_buffer)
else:
with open(path_or_buffer, 'r') as f:
templates = json.load(f)
options = []
grouper = itemgetter('page', 'extraction_method')
for key, grp in groupby(sorted(templates, key=grouper), grouper):
tmp_options = [_convert_template_option(e) for e in grp]
if len(tmp_options) == 1:
options.append(tmp_options[0])
continue
option = tmp_options[0]
areas = [e.get('area') for e in tmp_options]
option['area'] = areas
option['multiple_tables'] = True
options.append(option)
return options | Build tabula-py option from template file
Args:
file_like_obj: File like object of Tabula app template
Returns:
`obj`:dict: tabula-py options | juraj-google-style |
def set_json(self, reason='', new_page=False):
compressed_json = json.dumps(self._compress_json(self.cached_json))
if len(compressed_json) > self.max_page_size:
raise OverflowError(
'Usernotes page is too large (>{0} characters)'.
format(self.max_page_size)
)
if new_page:
self.subreddit.wiki.create(
self.page_name,
compressed_json,
reason
)
self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)
else:
self.subreddit.wiki[self.page_name].edit(
compressed_json,
reason
) | Send the JSON from the cache to the usernotes wiki page.
Arguments:
reason: the change reason that will be posted to the wiki changelog
(str)
Raises:
OverflowError if the new JSON data is greater than max_page_size | juraj-google-style |
def AddForwardLoopCounter(self, outer_grad_state):
n = constant_op.constant(0, name='f_count')
if outer_grad_state is not None:
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op)
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='f_count')
self.loop_enters.append(enter_n)
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name='f_count')
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return (total_iterations, next_n) | Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index. | github-repos |
def AddStorageMediaImageOptions(self, argument_group):
argument_group.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of '
'partitions can be defined as: "3..5". Multiple partitions can '
'be defined as: "1,3,5" (a list of comma separated values). '
'Ranges and lists can also be combined as: "1,3..5". The first '
'partition is 1. All partitions can be specified with: "all".'))
argument_group.add_argument(
'--volumes', '--volume', dest='volumes', action='store', type=str,
default=None, help=(
'Define volumes to be processed. A range of volumes can be defined '
'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list '
'of comma separated values). Ranges and lists can also be combined '
'as: "1,3..5". The first volume is 1. All volumes can be specified '
'with: "all".')) | Adds the storage media image options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group. | juraj-google-style |
def listSubjects(
self, query, status=None, start=None, count=None, vendorSpecific=None
):
response = self.listSubjectsResponse(
query, status, start, count, vendorSpecific
)
return self._read_dataone_type_response(response, 'SubjectInfo') | See Also: listSubjectsResponse()
Args:
query:
status:
start:
count:
vendorSpecific:
Returns: | juraj-google-style |
def add_keyword(self, keyword, schema=None, source=None):
keyword_dict = self._sourced_dict(source, value=keyword)
if schema is not None:
keyword_dict['schema'] = schema
self._append_to('keywords', keyword_dict) | Add a keyword.
Args:
keyword(str): keyword to add.
schema(str): schema to which the keyword belongs.
source(str): source for the keyword. | juraj-google-style |
def validate(self, x: symbolic.Symbolic) -> None:
if self._validator is not None:
self._validator(x) | Validates an input's integrity.
This method will be called in :func:`pyglove.patch` when a chain of patchers
have been applied, as to validate the patched object in chain still
conforms to the patcher's plan.
Args:
x: The input after modification. | github-repos |
def __init__(self, *args, **kwargs):
super(EnrollmentTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.EnrollmentTransaction | Create an instance.
Args:
*args:
**kwargs: | juraj-google-style |
def try_pick_piece_of_work(self, worker_id, submission_id=None):
client = self._datastore_client
unclaimed_work_ids = None
if submission_id:
unclaimed_work_ids = [k for (k, v) in iteritems(self.work) if (is_unclaimed(v) and (v['submission_id'] == submission_id))]
if (not unclaimed_work_ids):
unclaimed_work_ids = [k for (k, v) in iteritems(self.work) if is_unclaimed(v)]
if unclaimed_work_ids:
next_work_id = random.choice(unclaimed_work_ids)
else:
return None
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id, KIND_WORK, next_work_id)
work_entity = client.get(work_key, transaction=transaction)
if (not is_unclaimed(work_entity)):
return None
work_entity['claimed_worker_id'] = worker_id
work_entity['claimed_worker_start_time'] = get_integer_time()
transaction.put(work_entity)
except Exception:
return None
return next_work_id | Tries pick next unclaimed piece of work to do.
Attempt to claim work piece is done using Cloud Datastore transaction, so
only one worker can claim any work piece at a time.
Args:
worker_id: ID of current worker
submission_id: if not None then this method will try to pick
piece of work for this submission
Returns:
ID of the claimed work piece | codesearchnet |
def GetConsensusAddress(validators):
vlen = len(validators)
script = Contract.CreateMultiSigRedeemScript((vlen - int(((vlen - 1) / 3))), validators)
return Crypto.ToScriptHash(script) | Get the script hash of the consensus node.
Args:
validators (list): of Ellipticcurve.ECPoint's
Returns:
UInt160: | codesearchnet |
def do_operation_update(self, info, an_op):
self.update_op_func(self.metric_name, info, an_op) | Updates an operation using the assigned update_op_func
Args:
info: (:class:`endpoints_management.control.report_request.Info`): the
info instance to update
an_op: (:class:`endpoints_management.control.report_request.Info`):
the info instance to update
Return:
`True` if desc is supported, otherwise `False` | codesearchnet |
def goto(self, iroute: 'InstanceRoute') -> 'InstanceNode':
inst = self
for sel in iroute:
inst = sel.goto_step(inst)
return inst | Move the focus to an instance inside the receiver's value.
Args:
iroute: Instance route (relative to the receiver).
Returns:
The instance node corresponding to the target instance.
Raises:
InstanceValueError: If `iroute` is incompatible with the receiver's
value.
NonexistentInstance: If the instance node doesn't exist.
NonDataNode: If an instance route addresses a non-data node
(rpc/action/notification). | codesearchnet |
def log_cert_info(logger, msg_str, cert_obj):
list(map(logger, (['{}:'.format(msg_str)] + [' {}'.format(v) for v in ['Subject: {}'.format(_get_val_str(cert_obj, ['subject', 'value'], reverse=True)), 'Issuer: {}'.format(_get_val_str(cert_obj, ['issuer', 'value'], reverse=True)), 'Not Valid Before: {}'.format(cert_obj.not_valid_before.isoformat()), 'Not Valid After: {}'.format(cert_obj.not_valid_after.isoformat()), 'Subject Alt Names: {}'.format(_get_ext_val_str(cert_obj, 'SUBJECT_ALTERNATIVE_NAME', ['value', 'value'])), 'CRL Distribution Points: {}'.format(_get_ext_val_str(cert_obj, 'CRL_DISTRIBUTION_POINTS', ['value', 'full_name', 'value', 'value'])), 'Authority Access Location: {}'.format((extract_issuer_ca_cert_url(cert_obj) or '<not found>'))]]))) | Dump basic certificate values to the log.
Args:
logger: Logger
Logger to which to write the certificate values.
msg_str: str
A message to write to the log before the certificate values.
cert_obj: cryptography.Certificate
Certificate containing values to log.
Returns:
None | codesearchnet |
def reflection(n1, n2):
r = abs((n1-n2) / (n1+n2))**2
return r | Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power. | juraj-google-style |
def update_mp_firware_version(self, timeout=-1):
uri = "{}/mpFirmwareVersion".format(self.data["uri"])
return self._helper.do_put(uri, None, timeout, None) | Updates the iLO firmware on a physical server to a minimum ILO firmware version required by OneView to
manage the server.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Resource | juraj-google-style |
def get_class(schema_name):
global _registry_loaded
if not _registry_loaded:
load_message_classes()
try:
return _schema_name_to_class[schema_name]
except KeyError:
_log.warning(
'The schema "%s" is not in the schema registry! Either install '
"the package with its schema definition or define a schema. "
"Falling back to the default schema...",
schema_name,
)
return Message | Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from. | juraj-google-style |
def diagonalize_real_symmetric_matrix(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> np.ndarray:
if (np.any((np.imag(matrix) != 0)) or (not predicates.is_hermitian(matrix))):
raise ValueError('Input must be real and symmetric.')
(_, result) = np.linalg.eigh(matrix)
return result | Returns an orthogonal matrix that diagonalizes the given matrix.
Args:
matrix: A real symmetric matrix to diagonalize.
rtol: float = 1e-5,
atol: float = 1e-8
Returns:
An orthogonal matrix P such that P.T @ matrix @ P is diagonal.
Raises:
ValueError: Matrix isn't real symmetric. | codesearchnet |
def includes(self, lo_freq: float) -> bool:
if self._lb <= lo_freq <= self._ub:
return True
return False | Whether `lo_freq` is within the `LoRange`.
Args:
lo_freq: LO frequency to be checked
Returns:
bool: True if lo_freq is included in this range, otherwise False | juraj-google-style |
def split_leading_dim(tensor, inputs, n_dims=2):
input_shape_static = inputs.get_shape()
input_shape_list = input_shape_static.as_list()
tensor_shape_static = tensor.get_shape()
tensor_shape_list = tensor_shape_static.as_list()
if (input_shape_static.is_fully_defined()
and tensor_shape_static.is_fully_defined()):
new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:]
return tf.reshape(tensor, new_shape)
dims_after_first = tf.shape(tensor)[1:]
split_sizes = tf.shape(inputs)[:n_dims]
known_split_sizes = input_shape_list[:n_dims]
known_dims_after_first = tensor_shape_list[1:]
output_size = tf.concat([split_sizes, dims_after_first], 0)
result = tf.reshape(tensor, output_size)
result.set_shape(known_split_sizes + known_dims_after_first)
return result | Split the first dimension of a tensor.
Args:
tensor: Tensor to have its first dimension split.
inputs: Original reference input to look the dimensions of.
n_dims: Number of dimensions to split.
Returns:
The input tensor, with its first dimension split. | juraj-google-style |
def _get_events_list(object_key: str) -> List[str]:
return DB.get_list(_keys.events_list(object_key)) | Get list of event ids for the object with the specified key.
Args:
object_key (str): Key of an object in the database. | codesearchnet |
def get_worfklow_spec(self):
if (self.current.workflow_name not in self.workflow_spec_cache):
try:
self.current.wf_object = BPMNWorkflow.objects.get(name=self.current.workflow_name)
except ObjectDoesNotExist:
self.current.wf_object = BPMNWorkflow.objects.get(name='not_found')
self.current.task_data['non-existent-wf'] = self.current.workflow_name
self.current.workflow_name = 'not_found'
xml_content = self.current.wf_object.xml.body
spec = ZopsSerializer().deserialize_workflow_spec(xml_content, self.current.workflow_name)
spec.wf_id = self.current.wf_object.key
self.workflow_spec_cache[self.current.workflow_name] = spec
return self.workflow_spec_cache[self.current.workflow_name] | Generates and caches the workflow spec package from
BPMN diagrams that read from disk
Returns:
SpiffWorkflow Spec object. | codesearchnet |
def add_string_pairs_from_text_view_element(xib_file, results, text_view, special_ui_components_prefix):
text_view_entry_comment = extract_element_internationalized_comment(text_view)
if (text_view_entry_comment is None):
return
if (text_view.hasAttribute('usesAttributedText') and (text_view.attributes['usesAttributedText'].value == 'YES')):
add_string_pairs_from_attributed_ui_element(results, text_view, text_view_entry_comment)
else:
try:
text_view_entry_key = text_view.attributes['text'].value
results.append((text_view_entry_key, (text_view_entry_comment + ' default text value')))
except KeyError:
pass
warn_if_element_not_of_class(text_view, 'TextView', special_ui_components_prefix) | Adds string pairs from a textview element.
Args:
xib_file (str): Path to the xib file.
results (list): The list to add the results to.
text_view(element): The textview element from the xib, to extract the string pairs from.
special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT) | codesearchnet |
def create_parser():
parser = argparse_flags.ArgumentParser(description='saved_model_cli: Command-line interface for SavedModel', conflict_handler='resolve')
parser.add_argument('-v', '--version', action='version', version='0.1.0')
subparsers = parser.add_subparsers(title='commands', description='valid commands', help='additional help')
add_show_subparser(subparsers)
add_run_subparser(subparsers)
add_scan_subparser(subparsers)
add_convert_subparser(subparsers)
add_aot_compile_cpu_subparser(subparsers)
add_freeze_model_subparser(subparsers)
return parser | Creates a parser that parse the command line arguments.
Returns:
A namespace parsed from command line arguments. | github-repos |
def _HasExpired(self, key):
self.logger.debug('Processing key: %s.', key)
try:
(schema, json_str) = key.split(None, 3)[2:]
except (ValueError, AttributeError):
self.logger.debug('No schema identifier. Not expiring key.')
return False
if (schema != 'google-ssh'):
self.logger.debug('Invalid schema %s. Not expiring key.', schema)
return False
try:
json_obj = json.loads(json_str)
except ValueError:
self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)
return False
if ('expireOn' not in json_obj):
self.logger.debug('No expiration timestamp. Not expiring key.')
return False
expire_str = json_obj['expireOn']
format_str = '%Y-%m-%dT%H:%M:%S+0000'
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
self.logger.warning('Expiration timestamp "%s" not in format %s. Not expiring key.', expire_str, format_str)
return False
return (datetime.datetime.utcnow() > expire_time) | Check whether an SSH key has expired.
Uses Google-specific semantics of the OpenSSH public key format's comment
field to determine if an SSH key is past its expiration timestamp, and
therefore no longer to be trusted. This format is still subject to change.
Reliance on it in any way is at your own risk.
Args:
key: string, a single public key entry in OpenSSH public key file format.
This will be checked for Google-specific comment semantics, and if
present, those will be analysed.
Returns:
bool, True if the key has Google-specific comment semantics and has an
expiration timestamp in the past, or False otherwise. | codesearchnet |
def _parse_networks(service_list: dict) -> list:
networks = []
for n_values in service_list['networks'].values():
for n_key, n_value in n_values.items():
if 'name' in n_key:
networks.append(n_value)
return networks | Parse network key.
Args:
service_list (dict): Service configurations
Returns:
list, List of networks | juraj-google-style |
def _get_edge_sentences(
G: AnalysisGraph, source: str, target: str
) -> List[str]:
return chain.from_iterable(
[
[repr(e.text) for e in s.evidence]
for s in G.edges[source, target]["InfluenceStatements"]
]
) | Return the sentences that led to the construction of a specified edge.
Args:
G
source: The source of the edge.
target: The target of the edge. | juraj-google-style |
def calc_transition_to_state(self, newstate):
cached_val = JTAGStateMachine._lookup_cache.get((self.state, newstate))
if cached_val:
return cached_val
if (newstate not in self.states):
raise ValueError(('%s is not a valid state for this state machine' % newstate))
path = self._find_shortest_path(self._statestr, newstate)
if (not path):
raise ValueError('No path to the requested state.')
res = self._get_steps_from_nodes_path(path)
res.reverse()
JTAGStateMachine._lookup_cache[(self.state, newstate)] = res
return res | Given a target state, generate the sequence of transitions that would move this state machine instance to that target state.
Args:
newstate: A str state name to calculate the path to.
Returns:
A bitarray containing the bits that would transition this
state machine to the target state. The bits read from right
to left. For efficiency, this retulting bitarray is cached.
Do not edit this bitarray, or it will cause undefined
behavior. | codesearchnet |
def gates_to_uncompute(self):
q = QuantumRegister(self.num_qubits)
circuit = QuantumCircuit(q, name='disentangler')
remaining_param = self.params
for i in range(self.num_qubits):
(remaining_param, thetas, phis) = Initialize._rotations_to_disentangle(remaining_param)
rz_mult = self._multiplex(RZGate, phis)
ry_mult = self._multiplex(RYGate, thetas)
circuit.append(rz_mult.to_instruction(), q[i:self.num_qubits])
circuit.append(ry_mult.to_instruction(), q[i:self.num_qubits])
return circuit | Call to create a circuit with gates that take the
desired vector to zero.
Returns:
QuantumCircuit: circuit to take self.params vector to |00..0> | codesearchnet |
def setSingleStep(self, singleStep):
if not isinstance(singleStep, int):
raise TypeError("Argument is not of type int")
self._singleStep = abs(singleStep)
return self._singleStep | setter to _singleStep. converts negativ values to positiv ones.
Args:
singleStep (int): new _singleStep value. converts negativ values to positiv ones.
Raises:
TypeError: If the given argument is not an integer.
Returns:
int or long: the absolute value of the given argument. | juraj-google-style |
def put(self, block_id, priority, pb_type='offline'):
if (pb_type not in ('offline', 'realtime')):
raise ValueError('Invalid PB type.')
with self._mutex:
added_time = datetime.datetime.utcnow().isoformat()
entry = (priority, (sys.maxsize - self._index), block_id, pb_type, added_time)
self._index += 1
if (self._block_map.get(block_id) is not None):
raise KeyError('ERROR: Block id "{}" already exists in PC PB queue!'.format(block_id))
self._block_map[block_id] = entry
LOG.debug('Adding PB %s to queue', block_id)
self._queue.append(entry)
self._queue.sort()
self._queue.reverse() | Add a Processing Block to the queue.
When a new entry it added, the queue is (re-)sorted by priority
followed by insertion order (older blocks with equal priority are
first).
Args:
block_id (str): Processing Block Identifier
priority (int): Processing Block scheduling priority
(higher values = higher priority)
pb_type (str): Processing Block type (offline, realtime) | codesearchnet |
def rental_report(self, address, zipcode, format_type="json"):
query_params = {
"format": format_type,
"address": address,
"zipcode": zipcode
}
return self._api_client.fetch_synchronous("property/rental_report", query_params) | Call the rental_report component
Rental Report only supports a single address.
Args:
- address
- zipcode
Kwargs:
- format_type - "json", "xlsx" or "all". Default is "json". | juraj-google-style |
def get_user_info(self, token):
url = self.get_user_info_url()
try:
headers = {'Authorization': 'Bearer {}'.format(token)}
response = requests.get(url, headers=headers)
except requests.RequestException:
logger.exception('Failed to retrieve user info due to a request exception.')
raise UserInfoRetrievalFailed
if (response.status_code == 200):
return self.process_user_info_response(response.json())
else:
msg = 'Failed to retrieve user info. Server [{server}] responded with status [{status}].'.format(server=url, status=response.status_code)
raise UserInfoRetrievalFailed(msg) | Retrieves the user info from the OAuth provider.
Arguments:
token (str): OAuth2 access token.
Returns:
dict
Raises:
UserInfoRetrievalFailed: Retrieval of user info from the remote server failed. | codesearchnet |
def get_key_counter_alg(seed, alg):
if alg is None:
alg = Algorithm.AUTO_SELECT.value
alg = convert_alg_to_int(alg)
key, counter = _get_key_counter(seed, alg)
return (key, counter, alg) | Calculates the key, counter and algorithm to pass to raw RNG ops.
This function calculates the key and counter, and determines the algorithm
that will be passed to the raw RNG ops like `StatelessRandomUniformV2`.
Depending on the input `alg`, the key and counter may be scrambled or copied
from `seed`. If `alg` is `"auto_select"`, the key and counter will be
determined at runtime based on device type.
Args:
seed: An integer tensor of shape [2]. The seed to calculate the key and
counter from.
alg: The RNG algorithm. See `tf.random.stateless_uniform` for an
explanation.
Returns:
A pair (key, counter, algorithm) suitable for V2 stateless RNG ops like
`StatelessRandomUniformV2`. | github-repos |
def console_set_char_foreground(con: tcod.console.Console, x: int, y: int, col: Tuple[(int, int, int)]) -> None:
lib.TCOD_console_set_char_foreground(_console(con), x, y, col) | Change the foreground color of x,y to col.
Args:
con (Console): Any Console instance.
x (int): Character x position from the left.
y (int): Character y position from the top.
col (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
.. deprecated:: 8.4
Array access performs significantly faster than using this function.
See :any:`Console.fg`. | codesearchnet |
def append(self, text, afterline=None):
if afterline:
self._vim.current.buffer.append(text, afterline)
else:
self._vim.current.buffer.append(text) | Append text to the current buffer.
Args:
text (str or Sequence[str]): One or many lines of text to append.
afterline (Optional[int]):
Line number to append after. If 0, text is prepended before the
first line; if ``None``, at end of the buffer. | codesearchnet |
def import_project(self, file, path, namespace=None, overwrite=False, override_params=None, **kwargs):
files = {'file': ('file.tar.gz', file)}
data = {'path': path, 'overwrite': overwrite}
if override_params:
for (k, v) in override_params.items():
data[('override_params[%s]' % k)] = v
if namespace:
data['namespace'] = namespace
return self.gitlab.http_post('/projects/import', post_data=data, files=files, **kwargs) | Import a project from an archive file.
Args:
file: Data or file object containing the project
path (str): Name and path for the new project
namespace (str): The ID or path of the namespace that the project
will be imported to
overwrite (bool): If True overwrite an existing project with the
same path
override_params (dict): Set the specific settings for the project
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server failed to perform the request
Returns:
dict: A representation of the import status. | codesearchnet |
def create_from_json(cls, json_data):
prop = Property()
address_info = json_data['address_info']
prop.address = address_info['address']
prop.block_id = address_info['block_id']
prop.zipcode = address_info['zipcode']
prop.zipcode_plus4 = address_info['zipcode_plus4']
prop.address_full = address_info['address_full']
prop.city = address_info['city']
prop.county_fips = address_info['county_fips']
prop.geo_precision = address_info['geo_precision']
prop.lat = address_info['lat']
prop.lng = address_info['lng']
prop.slug = address_info['slug']
prop.state = address_info['state']
prop.unit = address_info['unit']
prop.meta = None
if ('meta' in json_data):
prop.meta = json_data['meta']
prop.component_results = _create_component_results(json_data, 'address_info')
return prop | Deserialize property json data into a Property object
Args:
json_data (dict): The json data for this property
Returns:
Property object | codesearchnet |
def object_upload(self, bucket, key, content, content_type):
args = {'uploadType': 'media', 'name': key}
headers = {'Content-Type': content_type}
url = (Api._UPLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, '')))
return google.datalab.utils.Http.request(url, args=args, data=content, headers=headers, credentials=self._credentials, raw_response=True) | Writes text content to the object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object to be written.
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if the object could not be written to. | codesearchnet |
def set_aliases_and_defaults(self, aliases_config=None, default_properties=None):
if (aliases_config is None):
with open(os.path.join(os.path.dirname(__file__), 'aliases.json')) as f:
d = json.load(f)
self.aliases = d.get('aliases', {})
self.default_criteria = d.get('defaults', {})
else:
self.aliases = aliases_config.get('aliases', {})
self.default_criteria = aliases_config.get('defaults', {})
if (default_properties is None):
(self._default_props, self._default_prop_dict) = (None, None)
else:
(self._default_props, self._default_prop_dict) = self._parse_properties(default_properties) | Set the alias config and defaults to use. Typically used when
switching to a collection with a different schema.
Args:
aliases_config:
An alias dict to use. Defaults to None, which means the default
aliases defined in "aliases.json" is used. See constructor
for format.
default_properties:
List of property names (strings) to use by default, if no
properties are given to the 'properties' argument of
query(). | codesearchnet |
def run(self, verbose=True):
self.results.clear()
for analysis_group in self.config.analysis_groups:
if analysis_group.providers:
for provider in analysis_group.providers:
logger.info('Run provider %s', provider.identifier)
provider.run()
for checker in analysis_group.checkers:
result = self._get_checker_result(
analysis_group, checker, provider)
self.results.append(result)
analysis_group.results.append(result)
if verbose:
result.print()
else:
for checker in analysis_group.checkers:
result = self._get_checker_result(
analysis_group, checker, nd='no-data-')
self.results.append(result)
analysis_group.results.append(result)
if verbose:
result.print() | Run the analysis.
Generate data from each provider, then check these data with every
checker, and store the analysis results.
Args:
verbose (bool): whether to immediately print the results or not. | juraj-google-style |
def _get_transformers(self):
transformer_dict = {}
for table in self.metadata['tables']:
table_name = table['name']
for field in table['fields']:
transformer_type = field.get('type')
if transformer_type:
col_name = field['name']
transformer_dict[(table_name, col_name)] = transformer_type
return transformer_dict | Load the contents of meta_file and extract information about the transformers.
Returns:
dict: tuple(str, str) -> Transformer. | codesearchnet |
def order(self, image_catalog_ids, batch_size=100, callback=None):
def _order_single_batch(url_, ids, results_list):
data = (json.dumps(ids) if (callback is None) else json.dumps({'acquisitionIds': ids, 'callback': callback}))
r = self.gbdx_connection.post(url_, data=data)
r.raise_for_status()
order_id = r.json().get('order_id')
if order_id:
results_list.append(order_id)
self.logger.debug('Place order')
url = (('%s/order' if (callback is None) else '%s/ordercb') % self.base_url)
batch_size = min(100, batch_size)
if (not isinstance(image_catalog_ids, list)):
image_catalog_ids = [image_catalog_ids]
sanitized_ids = list(set((id for id in (_id.strip() for _id in image_catalog_ids) if id)))
res = []
acq_ids_by_batch = zip(*([iter(sanitized_ids)] * batch_size))
for ids_batch in acq_ids_by_batch:
_order_single_batch(url, ids_batch, res)
remain_count = (len(sanitized_ids) % batch_size)
if (remain_count > 0):
_order_single_batch(url, sanitized_ids[(- remain_count):], res)
if (len(res) == 1):
return res[0]
elif (len(res) > 1):
return res | Orders images from GBDX.
Args:
image_catalog_ids (str or list): A single catalog id or a list of
catalog ids.
batch_size (int): The image_catalog_ids will be split into
batches of batch_size. The ordering API max
batch size is 100, if batch_size is greater
than 100 it will be truncated.
callback (str): A url to call when ordering is completed.
Returns:
order_ids (str or list): If one batch, returns a string. If more
than one batch, returns a list of order ids,
one for each batch. | codesearchnet |
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads):
with tf.name_scope('apply_gradients'):
var_update_ops = []
for (vid, (g, v)) in enumerate(ps_var_grads):
apply_gradient_op = opt.apply_gradients([(g, v)])
barrier = self._add_sync_queues_and_barrier('param_update_barrier_{}'.format(vid), [apply_gradient_op])
with tf.control_dependencies([barrier]), tf.device(self.cpu_device):
updated_value = v.read_value()
for towerid in range(self.nr_gpu):
var_update_ops.append(raw_grad_list[towerid][vid][1].assign(updated_value))
return var_update_ops | Apply averaged gradients to ps vars, and then copy the updated
variables back to each tower.
Args:
raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers
ps_var_grads: Nvar x 2 (grad, ps_var)
Returns:
list of copy ops | codesearchnet |
def call(self, inputs, training=None, mask=None):
return self._run_internal_graph(inputs, training=training, mask=mask) | Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Args:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs. | github-repos |
def checkUser(self, user):
return (not self.conn('POST', '{0}/GetCredentialType.srf'.format(SkypeConnection.API_MSACC), json={'username': user}).json().get('IfExistsResult')) | Query a username or email address to see if a corresponding Microsoft account exists.
Args:
user (str): username or email address of an account
Returns:
bool: whether the account exists | codesearchnet |
def get_videos_for_course(course_id, sort_field=None, sort_dir=SortDirection.asc, pagination_conf=None):
return _get_videos_for_filter({'courses__course_id': six.text_type(course_id), 'courses__is_hidden': False}, sort_field, sort_dir, pagination_conf) | Returns an iterator of videos for the given course id.
Args:
course_id (String)
sort_field (VideoSortField)
sort_dir (SortDirection)
Returns:
A generator expression that contains the videos found, sorted by the
given field and direction, with ties broken by edx_video_id to ensure a
total order. | codesearchnet |
def get(cls):
if cls.is_twoconspect:
return (cls.subconspect_el.value or None)
input_value = cls.input_el.value.strip()
if (not input_value):
return None
mdt = conspectus.mdt_by_name.get(input_value)
if (not mdt):
alert(('Invalid sub-conspect `%s`!' % input_value))
return None
return mdt | Get code selected by user.
Returns:
str: Code or None in case that user didn't selected anything yet. | codesearchnet |
def lookup_descriptor(self, definition_name):
try:
return self.__descriptors[definition_name]
except KeyError:
pass
if self.__descriptor_loader:
definition = self.__descriptor_loader(definition_name)
self.__descriptors[definition_name] = definition
return definition
else:
raise messages.DefinitionNotFoundError(
'Could not find definition for %s' % definition_name) | Lookup descriptor by name.
Get descriptor from library by name. If descriptor is not found will
attempt to find via descriptor loader if provided.
Args:
definition_name: Definition name to find.
Returns:
Descriptor that describes definition name.
Raises:
DefinitionNotFoundError if not descriptor exists for definition name. | juraj-google-style |
def get(account):
account = Account.get(account)
if not account:
return None
acct_type = AccountType.get(account.account_type_id).account_type
account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type)
return account_class(account) | Returns the class object identified by `account_id`
Args:
account (`int`, `str`): Unique ID of the account to load from database
Returns:
`Account` object if found, else None | juraj-google-style |
def Sleep(self, seconds):
time.sleep((seconds - int(seconds)))
for _ in range(int(seconds)):
time.sleep(1)
if (self.GetMemoryUsage() > self.memory_quota):
raise MemoryError('Exceeded memory allowance.')
if (not self.running):
break | Sleep a given time in 1 second intervals.
When a machine is suspended during a time.sleep(n) call for more
than n seconds, sometimes the sleep is interrupted and all threads
wake up at the same time. This leads to race conditions between
the threads issuing the heartbeat and the one checking for it. By
sleeping in small intervals, we make sure that even if one sleep
call is interrupted, we do not check for the heartbeat too early.
Args:
seconds: Number of seconds to sleep.
Raises:
MemoryError: if the process exceeds memory quota. | codesearchnet |
def guess_settings(self, major, minor):
version = major, minor
if self.vbr_method == 2:
if version in ((3, 90), (3, 91), (3, 92)) and self.encoding_flags:
if self.bitrate < 255:
return u"--alt-preset %d" % self.bitrate
else:
return u"--alt-preset %d+" % self.bitrate
if self.preset_used != 0:
return u"--preset %d" % self.preset_used
elif self.bitrate < 255:
return u"--abr %d" % self.bitrate
else:
return u"--abr %d+" % self.bitrate
elif self.vbr_method == 1:
if self.preset_used == 0:
if self.bitrate < 255:
return u"-b %d" % self.bitrate
else:
return u"-b 255+"
elif self.preset_used == 1003:
return u"--preset insane"
return u"-b %d" % self.preset_used
elif version in ((3, 90), (3, 91), (3, 92)):
preset_key = (self.vbr_quality, self.quality, self.vbr_method,
self.lowpass_filter, self.ath_type)
if preset_key == (1, 2, 4, 19500, 3):
return u"--preset r3mix"
if preset_key == (2, 2, 3, 19000, 4):
return u"--alt-preset standard"
if preset_key == (2, 2, 3, 19500, 2):
return u"--alt-preset extreme"
if self.vbr_method == 3:
return u"-V %s" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s --vbr-new" % self.vbr_quality
elif version in ((3, 93), (3, 94), (3, 95), (3, 96), (3, 97)):
if self.preset_used == 1001:
return u"--preset standard"
elif self.preset_used == 1002:
return u"--preset extreme"
elif self.preset_used == 1004:
return u"--preset fast standard"
elif self.preset_used == 1005:
return u"--preset fast extreme"
elif self.preset_used == 1006:
return u"--preset medium"
elif self.preset_used == 1007:
return u"--preset fast medium"
if self.vbr_method == 3:
return u"-V %s" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s --vbr-new" % self.vbr_quality
elif version == (3, 98):
if self.vbr_method == 3:
return u"-V %s --vbr-old" % self.vbr_quality
elif self.vbr_method in (4, 5):
return u"-V %s" % self.vbr_quality
elif version >= (3, 99):
if self.vbr_method == 3:
return u"-V %s --vbr-old" % self.vbr_quality
elif self.vbr_method in (4, 5):
p = self.vbr_quality
adjust_key = (p, self.bitrate, self.lowpass_filter)
p = {
(5, 32, 0): 7,
(5, 8, 0): 8,
(6, 8, 0): 9,
}.get(adjust_key, p)
return u"-V %s" % p
return u"" | Gives a guess about the encoder settings used. Returns an empty
string if unknown.
The guess is mostly correct in case the file was encoded with
the default options (-V --preset --alt-preset --abr -b etc) and no
other fancy options.
Args:
major (int)
minor (int)
Returns:
text | juraj-google-style |
def get_readrows_iterator(bq_read_client: BigQueryReadClient, table_metadata: TableMetadata, columns: Iterable[str] | None=None, data_format: DataFormat=DataFormat.AVRO) -> Iterable[Mapping]:
requested_session = ReadSession(table=table_metadata.table_path, data_format=data_format.value, read_options={'selected_fields': columns})
session = bq_read_client.create_read_session(parent=f'projects/{table_metadata.project_id}', read_session=requested_session, max_stream_count=1)
stream_name = session.streams[0].name
reader = bq_read_client.read_rows(stream_name)
rows = reader.rows(session)
return cast(Iterable[Mapping], rows) | Get an Iterator of row Mappings with the requested columns of the table,
using an authenticated BigQuery Storage API client.
Note: Does NOT support nested columns.
Args:
* bq_read_client: BigQuery Storage API Read client
* table_metadata: TableMetadata object
* columns (optional): List of columns to select
* data_format: Format to fetch data in, one of:
* DataFormat.AVRO
* DataFormat.ARROW
Defaults:
* columns: None, i.e. select all columns
* data_format: AVRO, since it auto-parses to Dict
Returns:
* Iterator of row Mappings | github-repos |
def complete(self, stream):
assert not self.is_complete()
self._marker.addInputPort(outputPort=stream.oport)
self.stream.oport.schema = stream.oport.schema
self._pending_schema._set(self.stream.oport.schema)
stream.oport.operator._start_op = True | Complete the pending stream.
Any connections made to :py:attr:`stream` are connected to `stream` once
this method returns.
Args:
stream(Stream): Stream that completes the connection. | juraj-google-style |
def add_multiple_to_queue(self, items, container=None):
if (container is not None):
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = ''
container_metadata = ''
chunk_size = 16
item_list = list(items)
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index:(index + chunk_size)]
uris = ' '.join([item.resources[0].uri for item in chunk])
uri_metadata = ' '.join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue([('InstanceID', 0), ('UpdateID', 0), ('NumberOfURIs', len(chunk)), ('EnqueuedURIs', uris), ('EnqueuedURIsMetaData', uri_metadata), ('ContainerURI', container_uri), ('ContainerMetaData', container_metadata), ('DesiredFirstTrackNumberEnqueued', 0), ('EnqueueAsNext', 0)]) | Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items. | codesearchnet |
def feed(self, data_len, feed_time=None):
self._bytes_transferred += data_len
self._collected_bytes_transferred += data_len
time_now = feed_time or time.time()
time_diff = time_now - self._last_feed_time
if time_diff < self._sample_min_time:
return
self._last_feed_time = time.time()
if data_len == 0 and time_diff >= self._stall_time:
self._stalled = True
return
self._samples.append((time_diff, self._collected_bytes_transferred))
self._collected_bytes_transferred = 0 | Update the bandwidth meter.
Args:
data_len (int): The number of bytes transfered since the last
call to :func:`feed`.
feed_time (float): Current time. | juraj-google-style |
def user_lists(self, username, member_type="USER"):
return self.client.service.getUserLists(username, member_type, self.proxy_id) | Look up all the lists that the user is a member of.
Args:
username (str): The MIT username of the user
member_type(str): The type of user, "USER" or "STRING"
Returns:
list of strings: names of the lists that this user is a member of | juraj-google-style |
def ask_for_approval(full_changeset=None, params_diff=None,
include_verbose=False):
approval_options = ['y', 'n']
if include_verbose:
approval_options.append('v')
approve = ui.ask("Execute the above changes? [{}] ".format(
'/'.join(approval_options))).lower()
if include_verbose and approve == "v":
if params_diff:
logger.info(
"Full changeset:\n\n%s\n%s",
format_params_diff(params_diff),
yaml.safe_dump(full_changeset),
)
else:
logger.info(
"Full changeset:\n%s",
yaml.safe_dump(full_changeset),
)
return ask_for_approval()
elif approve != "y":
raise exceptions.CancelExecution | Prompt the user for approval to execute a change set.
Args:
full_changeset (list, optional): A list of the full changeset that will
be output if the user specifies verbose.
params_diff (list, optional): A list of DictValue detailing the
differences between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
include_verbose (bool, optional): Boolean for whether or not to include
the verbose option | juraj-google-style |
def generate_string(self, initial_logits, initial_state, sequence_length):
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_hot)
gen_out_seq, current_state = self._core(
tf.nn.relu(self._embed_module(char_one_hot)),
current_state)
current_logits = self._output_module(gen_out_seq)
generated_string = tf.stack(generated_letters)
return generated_string | Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sample from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`. | juraj-google-style |
def testDefaultBoundaryConditionsWithInnerTerm(self, default_bc):
def second_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [[-(-x ** 3 + x)]]
def first_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [1 + x]
def inner_first_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return [-x ** 2 + 1]
@dirichlet
def lower_boundary_fn(t, x):
del x
return tf.math.exp(t)
@dirichlet
def upper_boundary_fn(t, x):
del x
return tf.math.exp(1.0 + t)
def zeroth_order_coeff_fn(t, coord_grid):
del t
x = coord_grid[0]
return 2 * x ** 2 - 1 + 2 * x - (1 - x ** 2)
grid = self.evaluate(grids.uniform_grid(minimums=[0], maximums=[1], sizes=[100], dtype=np.float64))
initial_values = tf.math.exp(grid[0])
time_step = 0.001
final_t = 0.1
if default_bc == 'left':
boundary_conditions = [(None, upper_boundary_fn)]
elif default_bc == 'right':
boundary_conditions = [(lower_boundary_fn, None)]
else:
boundary_conditions = [(None, None)]
est_values = fd_solvers.solve_forward(start_time=0, end_time=final_t, coord_grid=grid, values_grid=initial_values, time_step=time_step, boundary_conditions=boundary_conditions, second_order_coeff_fn=second_order_coeff_fn, first_order_coeff_fn=first_order_coeff_fn, inner_first_order_coeff_fn=inner_first_order_coeff_fn, zeroth_order_coeff_fn=zeroth_order_coeff_fn)[0]
true_values = tf.math.exp(final_t + grid[0])
self.assertAllClose(est_values, true_values, atol=0.01, rtol=0.01) | Test for PDE with default boundary condition with inner term.
Take equation
`u_{t} - (x - x**3)[u]_{xx} + (1 + x) * [(1 - x**2) u]_{x}
+ (2 * x**2 - 1 + 2 *x - (1 - x**2))u = 0` with
boundary conditions `u_{t} + (x - 1) u_{x} = 0` at x = 0
and `u(t, 1) = exp(t + 1)`, and an initial condition `u(0, x) = exp(x)`.
Solve this equation and compare the result to `u(t, x) = exp(t + x)`.
Args:
default_bc: A string to indicate which boundary condition is 'default'.
Can be either 'left', 'right', or 'both'. | github-repos |
def close_stream_transport(self, stream_transport, timeout):
with self._stream_transport_map_lock:
if (stream_transport.local_id in self._stream_transport_map):
del self._stream_transport_map[stream_transport.local_id]
if stream_transport.remote_id:
self.transport.write_message(adb_message.AdbMessage('CLSE', stream_transport.local_id, stream_transport.remote_id), timeout)
return True
return False | Remove the given stream transport's id from our map of id's.
If the stream id is actually removed, we send a CLSE message to let the
remote end know (this happens when we are ack'ing a CLSE message we
received). The ADB protocol doesn't say this is a requirement, but ADB
does it, so we do too.
Args:
stream_transport: The stream transport to close.
timeout: Timeout on the operation.
Returns:
True if the id was removed and message sent, False if it was already
missing from the stream map (already closed). | codesearchnet |
def _check_mr_state(cls, state, mr_id):
if (state is None):
logging.warning('Mapreduce State for job %s is missing. Dropping Task.', mr_id)
return False
if (not state.active):
logging.warning('Mapreduce %s is not active. Looks like spurious task execution. Dropping Task.', mr_id)
return False
return True | Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped. | codesearchnet |
def get_compression_type_string(cls, options):
if not options:
return ''
elif isinstance(options, TFRecordOptions):
return cls.get_compression_type_string(options.compression_type)
elif isinstance(options, TFRecordCompressionType):
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map:
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map.values():
return options
else:
raise ValueError('Not a valid compression_type: "{}"'.format(options)) | Convert various option types to a unified string.
Args:
options: `TFRecordOption`, `TFRecordCompressionType`, or string.
Returns:
Compression type as string (e.g. `'ZLIB'`, `'GZIP'`, or `''`).
Raises:
ValueError: If compression_type is invalid. | github-repos |
def _render_trajectories(self,
trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None:
if self._verbose:
non_fluents, initial_state, states, actions, interms, rewards = trajectories
shape = states[0][1].shape
batch_size, horizon, = shape[0], shape[1]
states = [(s[0], s[1][0]) for s in states]
interms = [(f[0], f[1][0]) for f in interms]
actions = [(a[0], a[1][0]) for a in actions]
rewards = np.reshape(rewards, [batch_size, horizon])[0]
self._render_batch(non_fluents, states, actions, interms, rewards) | Prints the first batch of simulated `trajectories`.
Args:
trajectories: NonFluents, states, actions, interms and rewards. | juraj-google-style |
def is_supported(cls, desc):
for m in cls:
if m.matches(desc):
return True
return False | Determines if the given metric descriptor is supported.
Args:
desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the
metric descriptor to test
Return:
`True` if desc is supported, otherwise `False` | juraj-google-style |
def make_group_index(self, groupby_cols, bool_arr):
(factor_list, values_list) = self.factorize_groupby_cols(groupby_cols)
if (len(factor_list) == 0):
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif (len(factor_list) == 1):
carray_factor = factor_list[0]
carray_values = values_list[0]
elif self.group_cache_valid(col_list=groupby_cols):
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = (col_rootdir + '.factor')
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = (col_rootdir + '.values')
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
(carray_factor, carray_values) = self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if (bool_arr is not None):
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval('(factor + 1) * bool - 1', user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
(carray_factor, values) = ctable_ext.factorize(carray_factor, labels)
filter_check = [key for (key, value) in values.items() if (value == (- 1))]
if filter_check:
skip_key = filter_check[0]
nr_groups = len(values)
if (skip_key is None):
skip_key = nr_groups
return (carray_factor, nr_groups, skip_key) | Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key) | codesearchnet |
def _get_environment_updates(self, display_all_distributions=False):
updates = []
for distribution in self.pip.get_installed_distributions():
versions = self.get_available_versions(distribution.project_name)
max_version = max(versions.keys()) if versions else UNKNOW_NUM
update = None
distribution_version = self._parse_version(distribution.version)
if versions and max_version > distribution_version:
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
prelease=max_version[-1]
)
elif (
display_all_distributions and
max_version == distribution_version
):
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
)
elif display_all_distributions:
update = Update(
distribution.project_name,
distribution.version,
UNKNOWN
)
if update:
updates.append(update)
return sorted(updates, key=lambda x: x.name) | Check all pacakges installed in the environment to see if there are
any updates availalble.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date. Defaults to ``False``.
Returns:
list: A list of Update objects ordered based on ``instance.name``. | juraj-google-style |
def timestampFormat(self, timestampFormat):
if not isinstance(timestampFormat, str):
raise TypeError('not of type unicode')
self._timestampFormat = timestampFormat | Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method. | juraj-google-style |
def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):
print '%s call joinCommissioned' % self.port
self.__sendCommand('ifconfig up')
cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)
print cmd
if self.__sendCommand(cmd)[0] == "Done":
maxDuration = 150
self.joinCommissionedStatus = self.joinStatus['ongoing']
if self.logThreadStatus == self.logStatus['stop']:
self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))
t_end = time.time() + maxDuration
while time.time() < t_end:
if self.joinCommissionedStatus == self.joinStatus['succeed']:
break
elif self.joinCommissionedStatus == self.joinStatus['failed']:
return False
time.sleep(1)
self.__sendCommand('thread start')
time.sleep(30)
return True
else:
return False | start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner | juraj-google-style |
def ParseForwardedIps(self, forwarded_ips):
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.append(ip[:-3] if ip.endswith('/32') else ip)
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses | Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings. | juraj-google-style |
def __init__(self, comma_compat=False):
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
BaseListParser.__init__(self, None, name) | Initializer.
Args:
comma_compat: bool - Whether to support comma as an additional separator.
If false then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated. | juraj-google-style |
def _print_unhashable(df, columns=None):
for c in df.columns if columns is None else columns:
if df.dtypes[c] == object:
try:
df[c].apply(hash)
except TypeError:
df[c] = df[c].dropna().apply(pformat).ix[df.index]
return df | Replace unhashable values in a DataFrame with their string repr
Args:
df: DataFrame
columns: columns to replace, if necessary. Default None replaces all columns. | juraj-google-style |
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):
try:
mrulist = self._ParseMRUListValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning('unable to parse MRUList value with error: {0!s}'.format(exception))
return
if (not mrulist):
return
values_dict = {}
found_terminator = False
for (entry_index, entry_letter) in enumerate(mrulist):
if (entry_letter == 0):
break
if found_terminator:
parser_mediator.ProduceExtractionWarning('found additional MRUList entries after terminator in key: {0:s}.'.format(registry_key.path))
found_terminator = False
entry_letter = chr(entry_letter)
value_string = self._ParseMRUListEntryValue(parser_mediator, registry_key, entry_index, entry_letter, codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:s}]'.format((entry_index + 1), entry_letter)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage. | codesearchnet |
def pretty_plot(width=8, height=None, plt=None, dpi=None, color_cycle=('qualitative', 'Set1_9')):
ticksize = int((width * 2.5))
golden_ratio = ((math.sqrt(5) - 1) / 2)
if (not height):
height = int((width * golden_ratio))
if (plt is None):
import matplotlib.pyplot as plt
import importlib
mod = importlib.import_module(('palettable.colorbrewer.%s' % color_cycle[0]))
colors = getattr(mod, color_cycle[1]).mpl_colors
from cycler import cycler
plt.figure(figsize=(width, height), facecolor='w', dpi=dpi)
ax = plt.gca()
ax.set_prop_cycle(cycler('color', colors))
else:
fig = plt.gcf()
fig.set_size_inches(width, height)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
ax = plt.gca()
ax.set_title(ax.get_title(), size=(width * 4))
labelsize = int((width * 3))
ax.set_xlabel(ax.get_xlabel(), size=labelsize)
ax.set_ylabel(ax.get_ylabel(), size=labelsize)
return plt | Provides a publication quality plot, with nice defaults for font sizes etc.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made to an
existing plot. Otherwise, a new plot will be created.
dpi (int): Sets dot per inch for figure. Defaults to 300.
color_cycle (tuple): Set the color cycle for new plots to one of the
color sets in palettable. Defaults to a qualitative Set1_9.
Returns:
Matplotlib plot object with properly sized fonts. | codesearchnet |
def desc_from_uri(uri):
if ":" in uri:
_, uri = uri.split(":", 1)
query_string = parse_qs(urlparse(uri, 'http').query)
if query_string.get('sn'):
account_serial_number = query_string['sn'][0]
try:
account = Account.get_accounts()[account_serial_number]
desc = "SA_RINCON{}_{}".format(
account.service_type, account.username)
return desc
except KeyError:
pass
if query_string.get('sid'):
service_id = query_string['sid'][0]
for service in MusicService._get_music_services_data().values():
if service_id == service["ServiceID"]:
service_type = service["ServiceType"]
account = Account.get_accounts_for_service(service_type)
if not account:
break
account = account[0]
desc = "SA_RINCON{}_{}".format(
account.service_type, account.username)
return desc
desc = 'RINCON_AssociatedZPUDN'
return desc | Create the content of DIDL desc element from a uri.
Args:
uri (str): A uri, eg:
``'x-sonos-http:track%3a3402413.mp3?sid=2&flags=32&sn=4'``
Returns:
str: The content of a desc element for that uri, eg
``'SA_RINCON519_email@example.com'`` | juraj-google-style |
def _backspaced_single_line_animation(animation_, *args, **kwargs):
animation_gen = animation_(*args, **kwargs)
yield next(animation_gen)
yield from util.concatechain(
util.BACKSPACE_GEN(kwargs['width']), animation_gen) | Turn an animation into an automatically backspaced animation.
Args:
animation: A function that returns a generator that yields
strings for animation frames.
args: Arguments for the animation function.
kwargs: Keyword arguments for the animation function.
Returns:
the animation generator, with backspaces applied to each but the first
frame. | juraj-google-style |
def post_headline(self, name, level, message):
self._client.post_headline(name, level, message) | Asynchronously update the sticky headline for a service.
Args:
name (string): The name of the service
level (int): A message level in states.*_LEVEL
message (string): The user facing error message that will be stored
for the service and can be queried later. | juraj-google-style |
def energies(self, samples_like, dtype=np.float):
samples, labels = as_samples(samples_like)
if labels:
idx, label = zip(*enumerate(labels))
labeldict = dict(zip(label, idx))
else:
labeldict = {}
num_samples = samples.shape[0]
energies = np.zeros(num_samples, dtype=dtype)
for term, bias in self.items():
if len(term) == 0:
energies += bias
else:
energies += np.prod([samples[:, labeldict[v]] for v in term], axis=0) * bias
return energies | The energies of the given samples.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
:obj:`numpy.ndarray`: The energies. | juraj-google-style |
def _init_global_step(self, global_step=USE_DEFAULT):
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step | Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If set to
USE_DEFAULT, creates global_step tensor. | github-repos |
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. The special tokens depend on calling set_lang.
An SeamlessM4T sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `[src_lang_code] X [eos]`
- `decoder_input_ids`: (for decoder) `[eos, tgt_lang_code] X [eos]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. | github-repos |
def __init__(self, num_labels: int, matcher: MaskFormerHungarianMatcher, weight_dict: Dict[str, float], eos_coef: float):
super().__init__()
requires_backends(self, ['scipy'])
self.num_labels = num_labels
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_labels + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight) | The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute
hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of
matched ground-truth / prediction (supervise class and mask)
Args:
num_labels (`int`):
The number of classes.
matcher (`MaskFormerHungarianMatcher`):
A torch module that computes the assignments between the predictions and labels.
weight_dict (`Dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class. | github-repos |
def put(f, s3_path, multipart_chunk_size_mb=500, logger=None):
if not logger:
logger = log.get_logger('s3')
fname = os.path.basename(f)
target = os.path.join(s3_path, fname)
s3cmd_cline = 's3cmd put {} {} --multipart-chunk-size-mb {}'.format(f,
target,
multipart_chunk_size_mb)
print_put_info(fname, target, logger)
s3cmd = sp.Popen(s3cmd_cline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
stdout, stderr = s3cmd.communicate() | Uploads a single file to S3, using s3cmd.
Args:
f (str): Path to a single file.
s3_path (str): The S3 path, with the filename omitted. The S3 filename
will be the basename of the ``f``. For example::
put(f='/path/to/myfile.tar.gz', s3_path='s3://my_bucket/path/to/')
will result in an uploaded S3 path of ``s3://my_bucket/path/to/myfile.tar.gz`` | juraj-google-style |
def anchored_pairs(self, anchor):
pairs = OrderedDict()
for term in self.keys:
score = self.get_pair(anchor, term)
if score: pairs[term] = score
return utils.sort_dict(pairs) | Get distances between an anchor term and all other terms.
Args:
anchor (str): The anchor term.
Returns:
OrderedDict: The distances, in descending order. | juraj-google-style |
def add_chain(self, name, order):
if name not in self.chains:
setattr(self.chains, name, MarkovChain(order=order))
else:
raise ValueError("Chain with this name already exists") | Add chain to current shelve file
Args:
name: chain name
order: markov chain order | juraj-google-style |
def update_shared_file(self,
sharekey=None,
title=None,
description=None):
if not sharekey:
raise Exception(
"You must specify a sharekey for the sharedfile"
"you wish to update.")
if not (title or description):
raise Exception("You must specify a title or description.")
post_data = {}
if title:
post_data['title'] = title
if description:
post_data['description'] = description
endpoint = '/api/sharedfile/{0}'.format(sharekey)
data = self._make_request('POST', endpoint=endpoint, data=post_data)
return SharedFile.NewFromJSON(data) | Update the editable details (just the title and description) of a
SharedFile.
Args:
sharekey (str): Sharekey of the SharedFile to update.
title (Optional[str]): Title of the SharedFile.
description (Optional[str]): Description of the SharedFile
Returns:
SharedFile on success, 404 on Sharekey not found, 403 on
unauthorized. | juraj-google-style |
def easeInBack(n, s=1.70158):
_checkRange(n)
return ((n * n) * (((s + 1) * n) - s)) | A tween function that backs up first at the start and then goes to the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). | codesearchnet |
def _calculate_scores(self, query, key):
q_reshaped = ops.expand_dims(query, axis=-2)
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1) | Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`. | github-repos |
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
(B, T) = actions.shape
assert ((B, (T + 1)) == p_old.shape[:2])
assert ((B, (T + 1)) == p_new.shape[:2])
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert ((B, T) == logp_old.shape)
assert ((B, T) == logp_new.shape)
probab_ratios = (np.exp((logp_new - logp_old)) * reward_mask)
assert ((B, T) == probab_ratios.shape)
return probab_ratios | Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}} | codesearchnet |
def _SendItem(self, zmq_socket, item, block=True):
try:
logger.debug('{0:s} sending item'.format(self.name))
if block:
zmq_socket.send_pyobj(item)
else:
zmq_socket.send_pyobj(item, zmq.DONTWAIT)
logger.debug('{0:s} sent item'.format(self.name))
return True
except zmq.error.Again:
logger.debug('{0:s} could not send an item'.format(self.name))
except zmq.error.ZMQError as exception:
if (exception.errno == errno.EINTR):
logger.error('ZMQ syscall interrupted in {0:s}.'.format(self.name))
return False | Attempts to send an item to a ZeroMQ socket.
Args:
zmq_socket (zmq.Socket): used to the send the item.
item (object): sent on the queue. Will be pickled prior to sending.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Returns:
bool: whether the item was sent successfully. | codesearchnet |
def restore(self, file_prefix: tensor_lib.Tensor, options: 'checkpoint_options.CheckpointOptions | None'=None) -> Mapping[str, ops.Operation]:
options = options or checkpoint_options.CheckpointOptions()
def restore_fn() -> Mapping[str, ops.Operation]:
restore_fn_inputs = {}
restore_fn_input_count = {fn: len(keys) for fn, keys in self._restore_fn_to_keys.items()}
restore_ops = {}
for task, shard in self._shardable_tensors_by_task.items():
with ops.device(task):
restored_tensor_dict = _single_shard_restore(file_prefix, shard, options)
for ckpt_key, slice_and_tensor in restored_tensor_dict.items():
for slice_spec, tensor in slice_and_tensor.items():
restore_fn = self._keys_to_restore_fn[ckpt_key, slice_spec]
if slice_spec:
restore_fn_inputs.setdefault(restore_fn, {}).setdefault(ckpt_key, {})[slice_spec] = tensor
else:
restore_fn_inputs.setdefault(restore_fn, {})[ckpt_key] = tensor
restore_fn_input_count[restore_fn] -= 1
if restore_fn_input_count[restore_fn] == 0:
restored_tensors = {}
for ckpt_key, tensor in restore_fn_inputs[restore_fn].items():
restored_tensors[trackable_utils.extract_local_name(ckpt_key)] = tensor
ret = restore_fn(restored_tensors)
if isinstance(ret, dict):
restore_ops.update(ret)
for _, (_, restore_fn) in self._registered_savers.items():
restore_fn(file_prefix)
return restore_ops
has_custom_device_saver = False
for sts in self._shardable_tensors_by_task.values():
if any([context.is_custom_device(st.device.to_string()) for st in sts]):
has_custom_device_saver = True
break
if context.executing_eagerly() and (self._num_unique_tasks > 1 or has_custom_device_saver):
@def_function.function(jit_compile=False, autograph=False)
def tf_function_restore() -> Mapping[str, ops.Operation]:
restore_fn()
return {}
restore_ops = tf_function_restore()
else:
restore_ops = restore_fn()
return restore_ops | Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
When not run eagerly or when saving on a single device, returns a
dictionary mapping from SaveableObject names to restore operations;
otherwise, returns an empty dict. | github-repos |
def _get_latest_eval_step_value(update_ops):
if isinstance(update_ops, dict):
update_ops = list(update_ops.values())
with ops.control_dependencies(update_ops):
return array_ops.identity(_get_or_create_eval_step().read_value()) | Gets the eval step `Tensor` value after running `update_ops`.
Args:
update_ops: A list of `Tensors` or a dictionary of names to `Tensors`, which
are run before reading the eval step value.
Returns:
A `Tensor` representing the value for the evaluation step. | github-repos |
def economic_svd(G, epsilon=sqrt(finfo(float).eps)):
from scipy.linalg import svd
G = asarray(G, float)
(U, S, V) = svd(G, full_matrices=False, check_finite=False)
ok = (S >= epsilon)
S = S[ok]
U = U[(:, ok)]
V = V[(ok, :)]
return (U, S, V) | r"""Economic Singular Value Decomposition.
Args:
G (array_like): Matrix to be factorized.
epsilon (float): Threshold on the square root of the eigen values.
Default is ``sqrt(finfo(float).eps)``.
Returns:
:class:`numpy.ndarray`: Unitary matrix.
:class:`numpy.ndarray`: Singular values.
:class:`numpy.ndarray`: Unitary matrix.
See Also
--------
numpy.linalg.svd : Cholesky decomposition.
scipy.linalg.svd : Cholesky decomposition. | codesearchnet |
def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
i0 = func.inputs[0]
bn_func = func
if bn_func.info.args["batch_stat"] == False:
if i0.parent.info.type_name in self.inner_prod_functions:
nn.logger.info("{} is skipped.".format(func.name))
continue
if func.name in self.inner_prod_functions:
inner_prod_func = func
o0 = inner_prod_func.outputs[0]
fs = self.graph_info.variable_to_funcs[o0]
if fs is not None and len(fs) == 1:
bn_func = fs[0]
if bn_func.name == "BatchNormalization":
if bn_func.info.args["batch_stat"] == False:
nn.logger.info("BatchNormalization parameters are folded to "
"the preceding convolution.")
o = self._inner_prod_bn_conversion(
inner_prod_func, bn_func)
continue
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable | All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | juraj-google-style |
def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen):
q_match_len = abs(qstart - qend) + 1
s_max = max(sstart, send)
s_min = min(sstart, send)
return (q_match_len < qlen) and (s_max >= slen or s_min <= 1) | Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end? | juraj-google-style |
def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}):
for area in subscriptions:
init_full(self, area, subscriptions[area])
subscriptions[area] = {'slots': subscriptions[area]}
if clock_name is not None:
self.clock_name = clock_name
self.clock_slots = clock_slots
subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1}
self.setup(puller=True, subscriptions=subscriptions) | Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.
Args:
clock_name: The name of the Area that is used as synchronizing Clock.
clock_slots: The slots of the Clock relevant to this Area.
subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values. | juraj-google-style |
def get_list_of_concatenated_objects(obj, dot_separated_name,
lst=None):
from textx.scoping import Postponed
if lst is None:
lst = []
if not obj:
return lst
if obj in lst:
return lst
lst.append(obj)
if type(obj) is Postponed:
return lst
ret = get_referenced_object(None, obj, dot_separated_name)
if type(ret) is list:
for r in ret:
lst = get_list_of_concatenated_objects(r, dot_separated_name, lst)
else:
lst = get_list_of_concatenated_objects(ret, dot_separated_name, lst)
return lst | get a list of the objects consisting of
- obj
- obj+"."+dot_separated_name
- (obj+"."+dot_separated_name)+"."+dot_separated_name (called recursively)
Note: lists are expanded
Args:
obj: the starting point
dot_separated_name: "the search path" (applied recursively)
lst: the initial list (e.g. [])
Returns:
the filled list (if one single object is requested, a list with one
entry is returned). | juraj-google-style |
def check_for_empty_defaults(status):
dirs_to_check = ('./vars', './handlers', './defaults', './tasks')
for (dirpath, dirname, filename) in os.walk('.'):
if ((dirpath == './files') or (dirpath == './templates')):
if (not any([dirname, filename])):
status.append('There are no files in the {0} directory. please remove directory'.format(dirpath))
if (dirpath in dirs_to_check):
try:
joined_filename = os.path.join(dirpath, 'main.yml')
with open(joined_filename, 'r') as f:
if re.match('^---\\n
status.append('Empty file, please remove file and directory: {0}'.format(joined_filename))
except IOError:
pass
return status | Method to check for empty roles structure.
When a role is created using ansible-galaxy it creates a default
scaffolding structure. Best practice dictates that if any of these are not
used then they should be removed. For example a bare main.yml with the
following string is created for a 'defaults' for a role called 'myrole':
---
defaults file for myrole
This should be removed.
Args:
status (list): list of pre-receive check failures to eventually print
to the user
Returns:
status list of current pre-redeive check failures. Might be an empty
list. | codesearchnet |
def _ParseNoHeaderSingleLine(self, parser_mediator, structure):
if not self._last_event_data:
logger.debug('SkyDrive, found isolated line with no previous events')
return
event_data = SkyDriveOldLogEventData()
event_data.offset = self._last_event_data.offset
event_data.text = structure.text
event = time_events.DateTimeValuesEvent(
self._last_date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
self._last_date_time = None
self._last_event_data = None | Parse an isolated header line and store appropriate attributes.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file. | juraj-google-style |
def kill_log_monitor(self, check_alive=True):
self._kill_process_type(ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive) | Kill the log monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead. | codesearchnet |
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh) | Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls* | juraj-google-style |
def apply_to_miz(self, miz):
report = ['Building mission with weather:']
miz.mission.weather.wind_at_ground_level_dir = self.wind_at_ground_level_dir
miz.mission.weather.wind_at_ground_level_speed = self.wind_at_ground_level_speed
miz.mission.weather.wind_at2000_dir = self._randomize_direction(self.wind_dir, 40)
miz.mission.weather.wind_at2000_speed = self._randomize_speed(5 + self.wind_at_ground_level_speed * 2)
miz.mission.weather.wind_at8000_dir = self._randomize_direction(self.wind_dir, 80)
miz.mission.weather.wind_at8000_speed = self._randomize_speed(10 + self.wind_at_ground_level_speed * 3)
miz.mission.weather.turbulence_at_ground_level = self.turbulence
_ground = f'{miz.mission.weather.wind_at_ground_level_dir}/{miz.mission.weather.wind_at_ground_level_speed}'
_at2000 = f'{miz.mission.weather.wind_at2000_dir}/{miz.mission.weather.wind_at2000_speed}'
_at8000 = f'{miz.mission.weather.wind_at8000_dir}/{miz.mission.weather.wind_at8000_speed}'
_turbulence = f'{miz.mission.weather.turbulence_at_ground_level}'
wind = f'Wind:' \
f'\n\tGround: {_ground}' \
f'\n\t2000m: {_at2000}' \
f'\n\t8000m: {_at8000}' \
f'\n\tTurbulence: {_turbulence}'
report.append(wind)
miz.mission.weather.atmosphere_type = 0
miz.mission.weather.qnh = self.qnh
report.append(f'Atmosphere type: {miz.mission.weather.atmosphere_type}')
report.append(f'QNH: {miz.mission.weather.qnh}')
miz.mission.weather.visibility = self.visibility
if self.fog_vis:
miz.mission.weather.fog_thickness = 1000
miz.mission.weather.fog_visibility = self.fog_vis
miz.mission.weather.fog_enabled = True
else:
miz.mission.weather.fog_enabled = False
miz.mission.weather.fog_visibility = 0
miz.mission.weather.fog_thickness = 0
visibility = f'Visibility: {miz.mission.weather.visibility}' \
f'\n\tFog: {"yes" if miz.mission.weather.fog_enabled else "no"}' \
f'\n\tFog thickness: {miz.mission.weather.fog_thickness}' \
f'\n\tFog visibility: {miz.mission.weather.fog_visibility}'
report.append(visibility)
miz.mission.weather.temperature = self.temperature
report.append(f'Temperature: {self.temperature}°C')
miz.mission.weather.cloud_density = max(self.force_cloud_density, self.cloud_density)
miz.mission.weather.cloud_thickness = self.cloud_thickness
miz.mission.weather.cloud_base = self.cloud_base
miz.mission.weather.precipitations = self.precipitations
clouds = f'Clouds:' \
f'\n\tClouds density: {miz.mission.weather.cloud_density}' \
f'\n\tClouds thickness: {miz.mission.weather.cloud_thickness}' \
f'\n\tClouds base: {miz.mission.weather.cloud_base}' \
f'\n\tPrecipitations: {miz.mission.weather.precipitations}'
report.append(clouds)
LOGGER.debug('applying weather: %s', report)
return True | Applies weather to an opened Miz file (the mission will be mutated)
Args:
miz: source miz
Returns: True | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.