code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def __init__(self, sink):
super().__init__()
self.sink = sink | Initializes a Write transform.
Args:
sink: Data sink to write to. | github-repos |
def sync(coro, timeout=None):
loop = initloop()
return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout) | Schedule a coroutine to run on the global loop and return it's result.
Args:
coro (coroutine): The coroutine instance.
Notes:
This API is thread safe and should only be called by non-loop threads. | codesearchnet |
def _pywrap_tensorflow():
try:
from tensorboard.compat import notf
except ImportError:
try:
from tensorflow.python import pywrap_tensorflow
return pywrap_tensorflow
except ImportError:
pass
from tensorboard.compat.tensorflow_stub import pywrap_tensorflow
return pywrap_tensorflow | Provide pywrap_tensorflow access in TensorBoard.
pywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow
and needs to be imported using
`from tensorflow.python import pywrap_tensorflow`. Therefore, we provide
a separate accessor function for it here.
NOTE: pywrap_tensorflow is not part of TensorFlow API and this
dependency will go away soon.
Returns:
pywrap_tensorflow import, if available.
Raises:
ImportError: if we couldn't import pywrap_tensorflow. | codesearchnet |
def parse(self, words):
def exact(words):
'If already represented as float or int, convert.'
try:
return float(words)
except:
return None
guess = exact(words)
if (guess is not None):
return guess
split = words.split(' ')
if (split[(- 1)] in self.__fractions__):
split[(- 1)] = self.__fractions__[split[(- 1)]]
elif (split[(- 1)] in self.__ordinals__):
split[(- 1)] = self.__ordinals__[split[(- 1)]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals) | A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words. | codesearchnet |
def _generate_splits(self, m, r):
new_rects = []
if (r.left > m.left):
new_rects.append(Rectangle(m.left, m.bottom, (r.left - m.left), m.height))
if (r.right < m.right):
new_rects.append(Rectangle(r.right, m.bottom, (m.right - r.right), m.height))
if (r.top < m.top):
new_rects.append(Rectangle(m.left, r.top, m.width, (m.top - r.top)))
if (r.bottom > m.bottom):
new_rects.append(Rectangle(m.left, m.bottom, m.width, (r.bottom - m.bottom)))
return new_rects | When a rectangle is placed inside a maximal rectangle, it stops being one
and up to 4 new maximal rectangles may appear depending on the placement.
_generate_splits calculates them.
Arguments:
m (Rectangle): max_rect rectangle
r (Rectangle): rectangle placed
Returns:
list : list containing new maximal rectangles or an empty list | codesearchnet |
def _dump_worker_context(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target, context.num_workers, context.is_chief, context.distributed_mode) | Dumps the properties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object. | github-repos |
def slicewise(tf_fn, xs, output_shape=None, output_dtype=None, splittable_dims=None, grad_function=None, name=None):
multiple_outputs = isinstance(output_dtype, list)
output_shapes = (output_shape if multiple_outputs else [output_shape])
output_dtypes = (output_dtype if multiple_outputs else [output_dtype])
op = SlicewiseOperation(tf_fn, xs, [(convert_to_shape(shape) or xs[0].shape) for shape in output_shapes], [(dtype or xs[0].dtype) for dtype in output_dtypes], splittable_dims, grad_function, name=name)
return (tuple(op.outputs) if multiple_outputs else op.outputs[0]) | Slice-wise call to any tensorflow function.
The output shape and dtype default to those of the first input.
splittable_dims is a list of Dimensions which can be split while keeping the
computation valid.
Args:
tf_fn: a function taking n tf.Tensors and returning a tf.Tensor
xs: a list of n Tensors
output_shape: a Shape (or list of shapes)
output_dtype: a dtype (or list of dtypes)
splittable_dims: a list of Dimensions which are ok to split
grad_function: an optional gradients function. If None, use tf gradient.
name: an optional string
Returns:
a Tensor (or a tuple of Tensors) | codesearchnet |
def get_pipeline_boxes(self, pipeline_key, sort_by = None):
if not pipeline_key:
return requests.codes.bad_request, None
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key
])
if sort_by:
if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:
uri += self.sort_by_postfix + sort_by
else:
return requests.codes.bad_request, {'success' : 'False',
'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''}
return self._req('get', uri) | Gets a list of all box objects in a pipeline. Performs a single GET.
Args:
pipeline_key key for pipeline
sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'
Not sure if it is supported
returns (status code for the GET request, dict of boxes) | juraj-google-style |
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
hashers = cls._ParseStringOption(
options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)
hasher_file_size_limit = cls._ParseNumericOption(
options, 'hasher_file_size_limit', default_value=0)
if hasher_file_size_limit < 0:
raise errors.BadConfigOption(
'Invalid hasher file size limit value cannot be negative.')
setattr(configuration_object, '_hasher_names_string', hashers)
setattr(
configuration_object, '_hasher_file_size_limit', hasher_file_size_limit) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation. | juraj-google-style |
def _check_if_fenced(self, name):
if (name in object.__getattribute__(self, '_attributes_to_fence')):
raise TranspilerAccessError(('The fenced %s has the property %s protected' % (type(object.__getattribute__(self, '_wrapped')), name))) | Checks if the attribute name is in the list of attributes to protect. If so, raises
TranspilerAccessError.
Args:
name (string): the attribute name to check
Raises:
TranspilerAccessError: when name is the list of attributes to protect. | codesearchnet |
def GetFileObjectByPathSpec(self, path_spec):
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.GetFileObject() | Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available. | juraj-google-style |
def with_max_depth(self, max_depth):
self._options['max_depth'] = max_depth
return self | Set the maximum depth of display.
The depth depends on profiling view. For 'scope' view, it's the
depth of name scope hierarchy (tree), for 'op' view, it's the number
of operation types (list), etc.
Args:
max_depth: Maximum depth of the data structure to display.
Returns:
self | github-repos |
class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):
def __init__(self, bos_token_id: int):
if bos_token_id < 0:
raise ValueError(f'The forced bos token id must be a non-negative integer, got {bos_token_id}')
self.bos_token_id = bos_token_id
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
if cur_len == 1:
batch_size, num_tokens = scores.shape
scores = tf.zeros((batch_size, 1))
if self.bos_token_id > 0:
scores = tf.concat((tf.broadcast_to(-float('inf'), (batch_size, self.bos_token_id)), scores), axis=-1)
if self.bos_token_id < num_tokens - 1:
scores = tf.concat((scores, tf.broadcast_to(-float('inf'), (batch_size, num_tokens - 1 - self.bos_token_id))), axis=-1)
return scores | [`TFLogitsProcessor`] that enforces the specified token as the first generated token.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token. | github-repos |
def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis. | juraj-google-style |
def _wrap_call_and_conditional_losses(layer):
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(_filtered_inputs([args, kwargs]))
else:
conditional_losses = [l for l in layer.losses if not hasattr(l, '_unconditional_loss')]
return (call_output, conditional_losses)
return _create_call_fn_decorator(layer, call_and_return_conditional_losses) | Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer | github-repos |
def uniprot_reviewed_checker(uniprot_id):
query_string = ('id:' + uniprot_id)
uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))
uni_rev_df = pd.read_table(uni_rev_raw, sep='\t', index_col=0)
uni_rev_df = uni_rev_df.fillna(False)
uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]
uni_rev_df = uni_rev_df.replace(to_replace='reviewed', value=True)
uni_rev_df = uni_rev_df.replace(to_replace='unreviewed', value=False)
uni_rev_dict_adder = uni_rev_df.to_dict()['Status']
return uni_rev_dict_adder[uniprot_id] | Check if a single UniProt ID is reviewed or not.
Args:
uniprot_id:
Returns:
bool: If the entry is reviewed | codesearchnet |
def get_aws_unique_id(timeout=DEFAULT_AWS_TIMEOUT):
try:
resp = requests.get(AWS_ID_URL, timeout=timeout).json()
except requests.exceptions.ConnectTimeout:
_logger.warning('Connection timeout when determining AWS unique '
'ID. Not using AWS unique ID.')
return None
else:
aws_id = "{0}_{1}_{2}".format(resp['instanceId'], resp['region'],
resp['accountId'])
_logger.debug('Using AWS unique ID %s.', aws_id)
return aws_id | Determine the current AWS unique ID
Args:
timeout (int): How long to wait for a response from AWS metadata IP | juraj-google-style |
def SetRange(self, range_offset, range_size):
if self._is_open:
raise IOError('Already open.')
if range_offset < 0:
raise ValueError(
'Invalid range offset: {0:d} value out of bounds.'.format(
range_offset))
if range_size < 0:
raise ValueError(
'Invalid range size: {0:d} value out of bounds.'.format(
range_size))
self._range_offset = range_offset
self._range_size = range_size
self._current_offset = 0 | Sets the data range (offset and size).
The data range is used to map a range of data within one file
(e.g. a single partition within a full disk image) as a file-like object.
Args:
range_offset (int): start offset of the data range.
range_size (int): size of the data range.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the range offset or range size is invalid. | juraj-google-style |
def derive_temporary_python2_environment(destination_directory: str, python3_environment: PreparedEnv, verbose: bool, env_name: str='.test_virtualenv_py2', python_path: str='/usr/bin/python2.7') -> PreparedEnv:
shutil.rmtree(destination_directory)
input_directory = cast(str, python3_environment.destination_directory)
os.chdir(input_directory)
conversion_script_path = os.path.join(input_directory, 'dev_tools', 'python2.7-generate.sh')
shell_tools.run_cmd('bash', conversion_script_path, destination_directory, input_directory, python3_environment.virtual_env_path, out=sys.stderr)
os.chdir(destination_directory)
env_path = os.path.join(destination_directory, env_name)
req_path = os.path.join(destination_directory, 'requirements.txt')
dev_req_path = os.path.join(destination_directory, 'pip-list-test-tools.txt')
contrib_req_path = os.path.join(destination_directory, 'cirq', 'contrib', 'contrib-requirements.txt')
req_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path, python_path=python_path, requirements_paths=req_paths, verbose=verbose)
return PreparedEnv(github_repo=python3_environment.repository, actual_commit_id=python3_environment.actual_commit_id, compare_commit_id=python3_environment.compare_commit_id, destination_directory=destination_directory, virtual_env_path=env_path) | Creates a python 2.7 environment starting from a prepared python 3 one.
Args:
destination_directory: Where to put the python 2 environment.
python3_environment: The prepared environment to start from.
verbose: When set, more progress output is produced.
env_name: The name to use for the virtualenv directory.
python_path: The python binary to use.
Returns:
A description of the environment that was prepared. | codesearchnet |
def hours(start, end=None):
return iterate.between(start, datetime.timedelta(hours=1), end) | Iterate over the hours between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a hour apart. | juraj-google-style |
def add_spin_by_element(self, spins):
for site in self.sites:
new_sp = {}
for (sp, occu) in site.species.items():
sym = sp.symbol
oxi_state = getattr(sp, 'oxi_state', None)
new_sp[Specie(sym, oxidation_state=oxi_state, properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu
site.species = new_sp | Add spin states to a structure.
Args:
spisn (dict): Dict of spins associated with
elements or species, e.g. {"Ni":+5} or {"Ni2+":5} | codesearchnet |
def __init__(self, f, name, problems):
self._f = f
self._name = name
self._crlf = 0
self._crlf_examples = []
self._lf = 0
self._lf_examples = []
self._line_number = 0
self._problems = problems | Create new object.
Args:
f: file-like object to wrap
name: name to use for f. StringIO objects don't have a name attribute.
problems: a ProblemReporterBase object | juraj-google-style |
def fetch(self, virtual_account_id, data={}, **kwargs):
return super(VirtualAccount, self).fetch(
virtual_account_id,
data,
**kwargs) | Fetch Virtual Account for given Id
Args:
virtual_account_id :
Id for which Virtual Account object has to be retrieved
Returns:
Virtual Account dict for given Virtual Account Id | juraj-google-style |
def _define_loop(graph, eval_steps):
loop = tools.Loop(None, graph.step, graph.should_log, graph.do_report, graph.force_reset)
loop.add_phase('eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=None, checkpoint_every=None, feed={graph.is_training: False})
return loop | Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object. | codesearchnet |
def load_py(stream, filepath=None):
with add_sys_paths(config.package_definition_build_python_paths):
return _load_py(stream, filepath=filepath) | Load python-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict. | codesearchnet |
def _aggregate(self, tests_results):
summary = {
"success": True,
"stat": {
"testcases": {
"total": len(tests_results),
"success": 0,
"fail": 0
},
"teststeps": {}
},
"time": {},
"platform": report.get_platform(),
"details": []
}
for tests_result in tests_results:
testcase, result = tests_result
testcase_summary = report.get_summary(result)
if testcase_summary["success"]:
summary["stat"]["testcases"]["success"] += 1
else:
summary["stat"]["testcases"]["fail"] += 1
summary["success"] &= testcase_summary["success"]
testcase_summary["name"] = testcase.config.get("name")
testcase_summary["in_out"] = utils.get_testcase_io(testcase)
report.aggregate_stat(summary["stat"]["teststeps"], testcase_summary["stat"])
report.aggregate_stat(summary["time"], testcase_summary["time"])
summary["details"].append(testcase_summary)
return summary | aggregate results
Args:
tests_results (list): list of (testcase, result) | juraj-google-style |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = utils.BytearrayStream()
if self._asynchronous_correlation_value:
self._asynchronous_correlation_value.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(CancelRequestPayload, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) | Write the data encoding the Cancel request payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined. | codesearchnet |
def get_file(self, file_name, local_destination=None, **kwargs):
if not local_destination:
local_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', '%s:%s' % (self.hostname, file_name), local_destination],
**kwargs) | Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task. | juraj-google-style |
def identity(shape, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rotation:
if fmt == 'rot_mat':
rot_mats = identity_rot_mats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=rot_mats, quats=None)
elif fmt == 'quat':
quats = identity_quats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError(f'Invalid format: f{fmt}') | Returns an identity Rotation.
Args:
shape:
The "shape" of the resulting Rotation object. See documentation for the shape property
dtype:
The torch dtype for the rotation
device:
The torch device for the new rotation
requires_grad:
Whether the underlying tensors in the new rotation object should require gradient computation
fmt:
One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
Returns:
A new identity rotation | github-repos |
def _tidy_names(names, nnames, extra_names=None):
if len(names) < nnames and extra_names is not None:
names.extend(extra_names)
names.extend(range(nnames - len(names)))
del names[nnames:] | Truncate or extend names so that its len is nnames.
The list is modified, this function returns nothing.
Args:
names (list): list of names.
nnames (int): desired number of names.
extra_names (list of str): list of names to be used to extend the list
if needed. If this list isn't provided, a range is used instead. | juraj-google-style |
def action_fluent_variables(self) -> FluentParamsList:
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_params(fluents, ordering) | Returns the instantiated action fluents in canonical order.
Returns:
Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name
and a list of instantiated fluents represented as strings. | codesearchnet |
def _validate_isvalid_quantity(self, isvalid_quantity, field, value):
quantity = Q_(value[0])
low_lim = (0.0 * units(property_units[field]))
try:
if (quantity <= low_lim):
self._error(field, 'value must be greater than 0.0 {}'.format(property_units[field]))
except pint.DimensionalityError:
self._error(field, ('incompatible units; should be consistent with ' + property_units[field])) | Checks for valid given value and appropriate units.
Args:
isvalid_quantity (`bool`): flag from schema indicating quantity to be checked.
field (`str`): property associated with quantity in question.
value (`list`): list whose first element is a string representing a value with units
The rule's arguments are validated against this schema:
{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}} | codesearchnet |
def save(self, wf_state):
self.wf_state = wf_state
self.wf_state['role_id'] = self.current.role_id
self.set(self.wf_state)
if (self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS):
self.publish(job='_zops_sync_wf_cache', token=self.db_key) | write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache
Args:
wf_state dict: wf state | codesearchnet |
def get_percentile(self, percentile):
assert (0 <= percentile <= 100), 'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile) | Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile | codesearchnet |
def get(identifier):
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
backend.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier: {}'.format(identifier)) | Retrieves a Keras Optimizer instance.
Args:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted. | github-repos |
def from_private_key(account_name, private_key=None, private_key_path=None, storage=None, storage_path=None, api_version='v3', readonly=False, http_client=None, ga_hook=None):
if (not private_key):
if (not private_key_path):
raise GapyError('Must provide either a private_key or a private_key_file')
if isinstance(private_key_path, basestring):
private_key_path = open(private_key_path)
private_key = private_key_path.read()
storage = _get_storage(storage, storage_path)
scope = (GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE)
credentials = SignedJwtAssertionCredentials(account_name, private_key, scope)
credentials.set_store(storage)
return Client(_build(credentials, api_version, http_client), ga_hook) | Create a client for a service account.
Create a client with an account name and a private key.
Args:
account_name: str, the account identifier (probably the account email).
private_key: str, the private key as a string.
private_key_path: str, path to a file with the private key in.
storage: oauth2client.client.Storage, a Storage implementation to store
credentials.
storage_path: str, path to a file storage.
readonly: bool, default False, if True only readonly access is requested
from GA.
http_client: httplib2.Http, Override the default http client used.
ga_hook: function, a hook that is called every time a query is made
against GA. | codesearchnet |
def _create_L_ind(self, L):
if issparse(L):
L = L.todense()
L_ind = np.zeros((self.n, self.m * self.k))
for y in range(1, self.k + 1):
L_ind[:, (y - 1) :: self.k] = np.where(L == y, 1, 0)
return L_ind | Convert a label matrix with labels in 0...k to a one-hot format
Args:
L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
Returns:
L_ind: An [n,m*k] dense np.ndarray with values in {0,1}
Note that no column is required for 0 (abstain) labels. | juraj-google-style |
def mangle(tree, toplevel=False):
sym_table = SymbolTable()
visitor = ScopeTreeVisitor(sym_table)
visitor.visit(tree)
fill_scope_references(tree)
mangle_scope_tree(sym_table.globals, toplevel)
mangler = NameManglerVisitor()
mangler.visit(tree) | Mangle names.
Args:
toplevel: defaults to False. Defines if global
scope should be mangled or not. | juraj-google-style |
def collect_members(module_to_name):
members = {}
for module, module_name in module_to_name.items():
all_names = getattr(module, "__all__", None)
for name, member in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and
not _always_drop_symbol_re.match(name) and
(all_names is None or name in all_names)):
fullname = '%s.%s' % (module_name, name)
if name in members:
other_fullname, other_member = members[name]
if member is not other_member:
raise RuntimeError("Short name collision between %s and %s" %
(fullname, other_fullname))
if len(fullname) == len(other_fullname):
raise RuntimeError("Can't decide whether to use %s or %s for %s: "
"both full names have length %d" %
(fullname, other_fullname, name, len(fullname)))
if len(fullname) > len(other_fullname):
continue
members[name] = fullname, member
return members | Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
Returns:
Dictionary mapping name to (fullname, member) pairs. | juraj-google-style |
def get_and_check_tasks_for(context, task, msg_prefix=''):
tasks_for = task['extra']['tasks_for']
if (tasks_for not in context.config['valid_tasks_for']):
raise ValueError('{}Unknown tasks_for: {}'.format(msg_prefix, tasks_for))
return tasks_for | Given a parent task, return the reason the parent task was spawned.
``.taskcluster.yml`` uses this to know whether to spawn an action,
cron, or decision task definition. ``tasks_for`` must be a valid one defined in the context.
Args:
task (dict): the task definition.
msg_prefix (str): the string prefix to use for an exception.
Raises:
(KeyError, ValueError): on failure to find a valid ``tasks_for``.
Returns:
str: the ``tasks_for`` | codesearchnet |
def sparse_union_indices_and_values(x1, x2_indices, x2_values=None):
zeros2 = tf.SparseTensor(x2_indices, tf.zeros((tf.shape(x2_indices)[0],), x1.values.dtype), x1.dense_shape)
x1_for_union = tf.sparse.add(x1, zeros2)
if x2_values is not None:
x2 = tf.SparseTensor(x2_indices, x2_values, x1.dense_shape)
zeros1 = tf.sparse.map_values(tf.zeros_like, x1)
x2_for_union = tf.sparse.add(x2, zeros1)
return (x1_for_union.indices, x1_for_union.values, x2_for_union.values)
else:
return (x1_for_union.indices, x1_for_union.values, None) | Compute the indices for the union of the indices of the provided
`tf.SparseTensor`s and another set of indices and return the modified values
for these indices.
Args:
x: a `tf.SparseTensor`.
indices: another set of indices in the `tf.SparseTensor` format.
Returns: A tuple containing:
- the indices for the union
- `x1` values for the union indices (some zeros were added)
- `x2` values for the union indices (some zeros were added) or `None` if
`x2_values` was `None`. | github-repos |
def put_content(self, url, content):
cache_path = self._url_to_path(url)
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if (e.errno != errno.EEXIST):
raise Error(('Failed to create cache directories for ' % cache_path))
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error(('Failed to cache content as %s for %s' % (cache_path, url))) | Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache | codesearchnet |
def get_day_of_month_description(self):
expression = self._expression_parts[3]
expression = expression.replace('?', '*')
if (expression == 'L'):
description = _(', on the last day of the month')
elif ((expression == 'LW') or (expression == 'WL')):
description = _(', on the last weekday of the month')
else:
regex = re.compile('(\\d{1,2}W)|(W\\d{1,2})')
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace('W', ''))
day_string = (_('first weekday') if (day_number == 1) else _('weekday nearest day {0}').format(day_number))
description = _(', on the {0} of the month').format(day_string)
else:
description = self.get_segment_description(expression, _(', every day'), (lambda s: s), (lambda s: (_(', every day') if (s == '1') else _(', every {0} days'))), (lambda s: _(', between day {0} and {1} of the month')), (lambda s: _(', on day {0} of the month')))
return description | Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description | codesearchnet |
def _viz_prototype(self, vis_fn):
def _viz_logger(*args, **kwargs):
self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs)
return _viz_logger | Outputs a function which will log the arguments to Visdom in an appropriate way.
Args:
vis_fn: A function, such as self.vis.image | codesearchnet |
def set_data(self, data={}, datetime_fields=[]):
if datetime_fields:
for field in datetime_fields:
if (field in data):
data[field] = self._parse_datetime(data[field])
super(CampfireEntity, self).set_data(data) | Set entity data
Args:
data (dict): Entity data
datetime_fields (array): Fields that should be parsed as datetimes | codesearchnet |
def __init__(self, channel):
self.PutObject = channel.stream_unary(
'/pfs.ObjectAPI/PutObject',
request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString,
)
self.PutObjectSplit = channel.stream_unary(
'/pfs.ObjectAPI/PutObjectSplit',
request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.Objects.FromString,
)
self.PutObjects = channel.stream_unary(
'/pfs.ObjectAPI/PutObjects',
request_serializer=client_dot_pfs_dot_pfs__pb2.PutObjectRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetObject = channel.unary_stream(
'/pfs.ObjectAPI/GetObject',
request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString,
response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,
)
self.GetObjects = channel.unary_stream(
'/pfs.ObjectAPI/GetObjects',
request_serializer=client_dot_pfs_dot_pfs__pb2.GetObjectsRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,
)
self.GetBlocks = channel.unary_stream(
'/pfs.ObjectAPI/GetBlocks',
request_serializer=client_dot_pfs_dot_pfs__pb2.GetBlocksRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,
)
self.TagObject = channel.unary_unary(
'/pfs.ObjectAPI/TagObject',
request_serializer=client_dot_pfs_dot_pfs__pb2.TagObjectRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.InspectObject = channel.unary_unary(
'/pfs.ObjectAPI/InspectObject',
request_serializer=client_dot_pfs_dot_pfs__pb2.Object.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString,
)
self.CheckObject = channel.unary_unary(
'/pfs.ObjectAPI/CheckObject',
request_serializer=client_dot_pfs_dot_pfs__pb2.CheckObjectRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.CheckObjectResponse.FromString,
)
self.ListObjects = channel.unary_stream(
'/pfs.ObjectAPI/ListObjects',
request_serializer=client_dot_pfs_dot_pfs__pb2.ListObjectsRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.Object.FromString,
)
self.DeleteObjects = channel.unary_unary(
'/pfs.ObjectAPI/DeleteObjects',
request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteObjectsResponse.FromString,
)
self.GetTag = channel.unary_stream(
'/pfs.ObjectAPI/GetTag',
request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString,
response_deserializer=google_dot_protobuf_dot_wrappers__pb2.BytesValue.FromString,
)
self.InspectTag = channel.unary_unary(
'/pfs.ObjectAPI/InspectTag',
request_serializer=client_dot_pfs_dot_pfs__pb2.Tag.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.ObjectInfo.FromString,
)
self.ListTags = channel.unary_stream(
'/pfs.ObjectAPI/ListTags',
request_serializer=client_dot_pfs_dot_pfs__pb2.ListTagsRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.ListTagsResponse.FromString,
)
self.DeleteTags = channel.unary_unary(
'/pfs.ObjectAPI/DeleteTags',
request_serializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsRequest.SerializeToString,
response_deserializer=client_dot_pfs_dot_pfs__pb2.DeleteTagsResponse.FromString,
)
self.Compact = channel.unary_unary(
'/pfs.ObjectAPI/Compact',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def delete_weight(self, object_name, weight_name):
def delete_weight_fn(weights_dict, source_name, target_name=None):
if weight_name not in weights_dict[source_name]:
raise ValueError(f'Weight {weight_name} not found in object {object_name}. Weights found: {list(weights_dict[source_name].keys())}')
weights_dict[source_name].pop(weight_name)
self._edit_object(delete_weight_fn, object_name) | Removes a weight from an existing object.
Args:
object_name: String, name or path of the
object from which to remove the weight
(e.g. `"dense_2"` or `"layers/dense_2"`).
weight_name: String, name of the weight to
delete (e.g. `"0"`). | github-repos |
def get_ext(url):
parsed = urllib.parse.urlparse(url)
root, ext = os.path.splitext(parsed.path)
return ext.lstrip('.') | Extract an extension from the url.
Args:
url (str): String representation of a url.
Returns:
str: Filename extension from a url (without a dot), '' if extension is not present. | juraj-google-style |
def load_source(source_file_path):
if os.path.isfile(source_file_path):
with open(source_file_path, 'rb') as f:
source_text = f.read().decode('utf-8')
source_lines = source_text.split('\n')
else:
source_lines = _try_load_par_source(source_file_path)
if source_lines is None:
raise IOError('Source path neither exists nor can be loaded as a .par file: %s' % source_file_path)
line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3
return (source_lines, line_num_width) | Load the content of a Python source code file.
This function covers the following case:
1. source_file_path points to an existing Python (.py) file on the
file system.
2. source_file_path is a path within a .par file (i.e., a zip-compressed,
self-contained Python executable).
Args:
source_file_path: Path to the Python source file to read.
Returns:
A length-2 tuple:
- Lines of the source file, as a `list` of `str`s.
- The width of the string needed to show the line number in the file.
This is calculated based on the number of lines in the source file.
Raises:
IOError: if loading is unsuccessful. | github-repos |
def _detect_encoding(data=None):
import locale
enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2',
'utf-16', 'cp720']
code = locale.getpreferredencoding(False)
if data is None:
return code
if code.lower() not in enc_list:
enc_list.insert(0, code.lower())
for c in enc_list:
try:
for line in data:
line.decode(c)
except (UnicodeDecodeError, UnicodeError, AttributeError):
continue
return c
print("Encoding not detected. Please pass encoding value manually") | Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding | juraj-google-style |
def merge_single_qubit_gates_into_phased_x_z(
circuit: circuits.Circuit,
atol: float = 1e-8) -> None:
def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]:
out_gates = decompositions.single_qubit_matrix_to_phased_x_z(
matrix, atol)
return [gate(qubit) for gate in out_gates]
MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit) | Canonicalizes runs of single-qubit rotations in a circuit.
Specifically, any run of non-parameterized circuits will be replaced by an
optional PhasedX operation followed by an optional Z operation.
Args:
circuit: The circuit to rewrite. This value is mutated in-place.
atol: Absolute tolerance to angle error. Larger values allow more
negligible gates to be dropped, smaller values increase accuracy. | juraj-google-style |
def _GetMetadataRequest(self, metadata_url, params=None, timeout=None):
headers = {'Metadata-Flavor': 'Google'}
params = urlparse.urlencode((params or {}))
url = ('%s?%s' % (metadata_url, params))
request = urlrequest.Request(url, headers=headers)
request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
timeout = (timeout or self.timeout)
return request_opener.open(request, timeout=(timeout * 1.1)) | Performs a GET request with the metadata headers.
Args:
metadata_url: string, the URL to perform a GET request on.
params: dictionary, the query parameters in the GET request.
timeout: int, timeout in seconds for metadata requests.
Returns:
HTTP response from the GET request.
Raises:
urlerror.HTTPError: raises when the GET request fails. | codesearchnet |
def __call__(self, utterances_batch: list, history_batch: list) -> list:
return [[True] * len(utterances_batch)] * self.size | Returns skills-utterances application matrix.
Generates skills-utterances application matrix with all True
elements.
Args:
utterances_batch: A batch of utterances of any type.
history_batch: Not used.
Returns:
response: Skills-utterances application matrix with all True
elements. | juraj-google-style |
def batch(self, timelimit=None):
from .launcher import BatchLauncher
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:(- 1)])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, (os.path.basename(self.workdir) + '_batch'))
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit) | Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used. | codesearchnet |
def set_value(self, value: ScalarType) -> None:
if isinstance(value, bool):
value_str = 'true' if value else 'false'
else:
value_str = str(value)
start_mark = self.yaml_node.start_mark
end_mark = self.yaml_node.end_mark
tag = self.yaml_node.tag
if tag.startswith('tag:yaml.org,2002:'):
tag = scalar_type_to_tag[type(value)]
new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)
self.yaml_node = new_node | Sets the value of the node to a scalar value.
After this, is_scalar(type(value)) will return true.
Args:
value: The value to set this node to, a str, int, float, \
bool, or None. | juraj-google-style |
def query(self, query):
path = self.path(query.key)
if os.path.exists(path):
filenames = os.listdir(path)
filenames = list((set(filenames) - set(self.ignore_list)))
filenames = map((lambda f: os.path.join(path, f)), filenames)
iterable = self._read_object_gen(filenames)
else:
iterable = list()
return query(iterable) | Returns an iterable of objects matching criteria expressed in `query`
FSDatastore.query queries all the `.obj` files within the directory
specified by the query.key.
Args:
query: Query object describing the objects to return.
Raturns:
Cursor with all objects matching criteria | codesearchnet |
def parse(file_or_string):
from mysqlparse.grammar.sql_file import sql_file_syntax
if (hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__')):
return sql_file_syntax.parseString(file_or_string.read())
elif isinstance(file_or_string, six.string_types):
return sql_file_syntax.parseString(file_or_string)
else:
raise TypeError("Expected file-like or string object, but got '{type_name}' instead.".format(type_name=type(file_or_string).__name__)) | Parse a file-like object or string.
Args:
file_or_string (file, str): File-like object or string.
Returns:
ParseResults: instance of pyparsing parse results. | codesearchnet |
def testGradient(self, params, indices, expected_out, out_grad, expected_grad, params_ragged_rank=None):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant(params, dtype=dtypes.float32, ragged_rank=params_ragged_rank)
indices = constant_op.constant(indices, dtype=dtypes.int32)
out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1
out_grad = ragged_factory_ops.constant(out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_out = ragged_factory_ops.constant(expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank)
expected_grad = ragged_factory_ops.constant(expected_grad, dtype=dtypes.float32, ragged_rank=params.ragged_rank)
out = ragged_gather_ops.gather(params, indices)
self.assertAllClose(out, expected_out)
grads = gradients_impl.gradients(out.flat_values, params.nested_row_splits + (params.flat_values, indices), out_grad.flat_values)
param_nested_splits_grads = grads[:-2]
params_flat_values_grad = grads[-2]
indices_grad = grads[-1]
self.assertEqual(indices_grad, None)
for splits_grad in param_nested_splits_grads:
self.assertEqual(splits_grad, None)
self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices)
params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad)
params_grad = params.with_flat_values(params_flat_values_grad)
self.assertAllClose(params_grad, expected_grad, atol=2e-06, rtol=2e-06) | Tests that ragged_gather generates the right gradient.
Args:
params: The `params` that should be passed to `gather`.
indices: The `indices` that should be passed to `gather`.
expected_out: The expected value of `gather(params, indices)`.
`expected_out.shape = indices.shape + params.shape[1:]`.
out_grad: The value that should be fed in as the gradient for `out`
when testing the gradient of `ragged_gather`. Must have the same
shape as `expected_out`.
expected_grad: The expected gradient for that should be returned for
`params`. Must have hte same shape as `params`.
params_ragged_rank: The ragged_rank of `params`. | github-repos |
def put(self, resource):
endpoint = self.endpoint
if resource.id:
endpoint = self._build_url(endpoint, resource.id)
response = self.api.execute('PUT', endpoint, json=resource.as_dict())
if (not response.ok):
raise Error.parse(response.json())
return self._cls.parse(response.json()) | Edits an existing resource
Args:
resource - gophish.models.Model - The resource instance | codesearchnet |
def char(self, c: str) -> None:
if (self.peek() == c):
self.offset += 1
else:
raise UnexpectedInput(self, f"char '{c}'") | Parse the specified character.
Args:
c: One-character string.
Raises:
EndOfInput: If past the end of `self.input`.
UnexpectedInput: If the next character is different from `c`. | codesearchnet |
def SendTracebacks(self, request, context):
return debug_service_pb2.EventReply() | Base implementation of the handling of SendTracebacks calls.
The base implementation does nothing with the incoming request.
Override in an implementation of the server if necessary.
Args:
request: A `CallTraceback` proto, containing information about the
type (e.g., graph vs. eager execution) and source-code traceback of the
call and (any) associated `tf.Graph`s.
context: Server context.
Returns:
A `EventReply` proto. | github-repos |
def __init__(self, resource_handle, create_op, name):
stamp_token, serialized = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)
slice_spec = ''
specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized')]
super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)
self.resource_handle = resource_handle
self._create_op = create_op | Creates a _TreeEnsembleSavable object.
Args:
resource_handle: handle to the decision tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under. | github-repos |
def create_linear(num_finite_buckets, width, offset):
if (num_finite_buckets <= 0):
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if (width <= 0.0):
raise ValueError((_BAD_FLOAT_ARG % (u'width', 0.0)))
return sc_messages.Distribution(bucketCounts=([0] * (num_finite_buckets + 2)), linearBuckets=sc_messages.LinearBuckets(numFiniteBuckets=num_finite_buckets, width=width, offset=offset)) | Creates a new instance of distribution with linear buckets.
Args:
num_finite_buckets (int): initializes number of finite buckets
width (float): initializes the width of each bucket
offset (float): initializes the offset
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance | codesearchnet |
def remove_metric(self, metric_name):
with self._lock:
metric = self._metrics.pop(metric_name, None)
if metric:
for reporter in self._reporters:
reporter.metric_removal(metric)
return metric | Remove a metric if it exists and return it. Return None otherwise.
If a metric is removed, `metric_removal` will be invoked
for each reporter.
Arguments:
metric_name (MetricName): The name of the metric
Returns:
KafkaMetric: the removed `KafkaMetric` or None if no such
metric exists | codesearchnet |
def show_help(bokehjs_action):
print()
if bokehjs_action in ['built', 'installed']:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build-js build and install a fresh BokehJS")
print(" --install-js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print("No extra Bokeh-specific options are available.")
print() | Print information about extra Bokeh-specific command line options.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
Returns:
None | juraj-google-style |
async def get_active(self, *args, **kwargs):
coinids = kwargs.get('coinids')
uid = kwargs.get('uid', 0)
address = kwargs.get('address')
try:
coinid = coinid.replace('TEST', '')
except:
pass
try:
uid = int(uid)
except:
return (await self.error_400('User id must be integer. '))
if ((not uid) and address):
uid = (await self.get_uid_by_address(address=address, coinid=coinid))
if isinstance(uid, dict):
return uid
if (not all([coinids, uid])):
return (await self.error_400('Get active. Missed required fields.'))
if isinstance(coinids, list):
actives = {}
for coinid in coinids:
database = self.client[self.collection]
collection = database[coinid]
balance = (await collection.find_one({'uid': uid}))
if (not balance):
return (await self.error_404(('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid))))
actives[coinid] = int(balance['amount_active'])
if isinstance(coinids, str):
actives = {}
for coinid in self.coinids:
database = self.client[coinid]
collection = database[self.collection]
balance = (await collection.find_one({'uid': uid}))
if (not balance):
return (await self.error_404(('Get active. Balance with uid:%s and type:%s not found' % (uid, coinid))))
actives[coinid] = int(balance['amount_active'])
return actives | Get active users balance
Accepts:
- uid [integer] (users id)
- types [list | string] (array with needed types or "all")
Returns:
{
type [string] (blockchain type): amount
} | codesearchnet |
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params) | Gets information about a snapshot.
Args:
request: (DataflowProjectsLocationsSnapshotsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Snapshot) The response message. | github-repos |
def score_one(self, x: beam.Row) -> Optional[float]:
if len(x.__dict__) != 1:
raise ValueError('RobustZScore.score_one expected univariate input, but got %s', str(x))
v = next(iter(x))
if v is None or math.isnan(v):
return None
median = self._mad_tracker.get_median()
mad = self._mad_tracker.get()
if math.isnan(mad) or math.isnan(median):
return float('NaN')
if abs(mad) < EPSILON:
return 0.0
return abs(RobustZScore.SCALE_FACTOR * (v - median) / mad) | Scores a data point using the Robust Z-Score.
Args:
x: A `beam.Row` containing a single numerical value.
Returns:
float | None: The Robust Z-Score. | github-repos |
def find_file(search_dir, file_pattern):
for (root, dirnames, fnames) in os.walk(search_dir):
for fname in fnames:
if fnmatch.fnmatch(fname, file_pattern):
return os.path.join(root, fname)
return '' | Search for a file in a directory, and return the first match.
If the file is not found return an empty string
Args:
search_dir: The root directory to search in
file_pattern: A unix-style wildcard pattern representing
the file to find
Returns:
The path to the file if it was found, otherwise an empty string | codesearchnet |
def create_identical_dataset_and_algorithm_tuner(parent, additional_parents=None, sagemaker_session=None):
parent_tuner = HyperparameterTuner.attach(tuning_job_name=parent, sagemaker_session=sagemaker_session)
return parent_tuner.identical_dataset_and_algorithm_tuner(additional_parents=additional_parents) | Creates a new tuner by copying the request fields from the provided parent to the new instance of
``HyperparameterTuner`` followed by addition of warm start configuration with the type as
"IdenticalDataAndAlgorithm" and ``parents`` as the union of provided list of ``additional_parents`` and the
``parent``.
Args:
parent (str): Primary parent tuning job's name from which the Tuner and Estimator configuration has to be copied
additional_parents (set{str}): Set of additional parent tuning job's names along with the primary parent tuning
job name to be used in warm starting the transfer learning tuner.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, one is created
using the default AWS configuration chain.
Returns:
sagemaker.tuner.HyperparameterTuner: a new ``HyperparameterTuner`` object for the warm-started
hyperparameter tuning job | codesearchnet |
def send_email(recipients, subject, message, attachments=None):
if not attachments:
attachments = []
if os.path.exists(EMAIL_SETTINGS_FILE):
email_settings = json.load(open(EMAIL_SETTINGS_FILE))
sender = email_settings.get('sender', 'ambry@localhost')
use_tls = email_settings.get('use_tls')
username = email_settings['username']
password = email_settings['password']
server = email_settings['server']
else:
server = 'localhost'
username = None
password = None
sender = 'ambry@localhost'
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ','.join(recipients)
msg.attach(MIMEText(message, 'plain'))
for file_name in attachments:
if os.path.exists(file_name):
with open(file_name, 'r') as fp:
attachment = MIMEBase('application', 'text')
attachment.set_payload(fp.read())
attachment.add_header(
'Content-Disposition',
'attachment; filename="{}"'.format(os.path.basename(file_name)))
msg.attach(attachment)
srv = smtplib.SMTP(server)
if use_tls:
srv.starttls()
if username:
srv.login(username, password)
srv.sendmail(sender, ','.join(recipients), msg.as_string())
srv.quit() | Sends email.
Args:
recipients (list of str):
subject (str):
message (str):
attachments (list of str): list containing full paths (txt files only) to attach to email. | juraj-google-style |
def run(self, module, post_check):
try:
_cwd = os.getcwd()
_sys_path = list(sys.path)
_sys_argv = list(sys.argv)
sys.path.insert(0, os.path.dirname(self._path))
sys.argv = ([os.path.basename(self._path)] + self._argv)
exec(self._code, module.__dict__)
post_check()
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
(_exc_type, _exc_value, exc_traceback) = sys.exc_info()
(filename, line_number, func, txt) = traceback.extract_tb(exc_traceback)[(- 1)]
self._error = ('%s\nFile "%s", line %d, in %s:\n%s' % (str(e), os.path.basename(filename), line_number, func, txt))
finally:
os.chdir(_cwd)
sys.path = _sys_path
sys.argv = _sys_argv
self.ran = True | Execute the configured source code in a module and run any post
checks.
Args:
module (Module) : a module to execute the configured code in.
post_check(callable) : a function that can raise an exception
if expected post-conditions are not met after code execution. | codesearchnet |
def from_composition_and_pd(comp, pd, working_ion_symbol='Li'):
working_ion = Element(working_ion_symbol)
entry = None
working_ion_entry = None
for e in pd.stable_entries:
if (e.composition.reduced_formula == comp.reduced_formula):
entry = e
elif (e.is_element and (e.composition.reduced_formula == working_ion_symbol)):
working_ion_entry = e
if (not entry):
raise ValueError('Not stable compound found at composition {}.'.format(comp))
profile = pd.get_element_profile(working_ion, comp)
profile.reverse()
if (len(profile) < 2):
return None
working_ion_entry = working_ion_entry
working_ion = working_ion_entry.composition.elements[0].symbol
normalization_els = {}
for (el, amt) in comp.items():
if (el != Element(working_ion)):
normalization_els[el] = amt
vpairs = [ConversionVoltagePair.from_steps(profile[i], profile[(i + 1)], normalization_els) for i in range((len(profile) - 1))]
return ConversionElectrode(vpairs, working_ion_entry, comp) | Convenience constructor to make a ConversionElectrode from a
composition and a phase diagram.
Args:
comp:
Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
pd:
A PhaseDiagram of the relevant system (e.g., Li-Fe-F)
working_ion_symbol:
Element symbol of working ion. Defaults to Li. | codesearchnet |
def get_service_details(self, service_id: str) -> dict:
if not self._manager:
raise RuntimeError('Only the Swarm manager node can retrieve all'
' the services details.')
service = self._client.services.get(service_id)
return service.attrs | Get details of a service.
Only the manager nodes can retrieve service details
Args:
service_id (string): List of service id
Returns:
dict, details of the service | juraj-google-style |
def _get_upload_session_status(res):
response = json.loads(res.body.decode())
if 'sessionStatus' not in response:
try:
info = (
response['errorMessage']['additionalInfo']
['uploader_service.GoogleRupioAdditionalInfo']
['completionInfo']['customerSpecificInfo']
)
reason = '{} : {}'.format(info['status'], info['message'])
except KeyError:
reason = 'unknown reason'
raise exceptions.NetworkError('image upload failed: {}'.format(
reason
))
return response['sessionStatus'] | Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed. | juraj-google-style |
def configure_plugin(self, name, options):
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = ['{0}={1}'.format(k, v) for (k, v) in six.iteritems(data)]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True | Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful | codesearchnet |
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token', None) is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
output_video_processor_file = os.path.join(save_directory, VIDEO_PROCESSOR_NAME)
self.to_json_file(output_video_processor_file)
logger.info(f'Video processor saved in {output_video_processor_file}')
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))
return [output_video_processor_file] | Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
[`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the video processor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. | github-repos |
def poke_native(getstate):
def poke(service, objname, obj, container, visited=None, _stack=None):
service.pokeNative(objname, getstate(obj), container)
return poke | Serializer factory for types which state can be natively serialized.
Arguments:
getstate (callable): takes an object and returns the object's state
to be passed to `pokeNative`.
Returns:
callable: serializer (`poke` routine). | juraj-google-style |
def memory_read64(self, addr, num_long_words):
buf_size = num_long_words
buf = (ctypes.c_ulonglong * buf_size)()
units_read = self._dll.JLINKARM_ReadMemU64(addr, buf_size, buf, 0)
if (units_read < 0):
raise errors.JLinkException(units_read)
return buf[:units_read] | Reads memory from the target system in units of 64-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_long_words (int): number of long words to read
Returns:
List of long words read from the target system.
Raises:
JLinkException: if memory could not be read | codesearchnet |
def _register_info(self, server):
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info) | Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`. | juraj-google-style |
def create(self, port, value, timestamp=None):
session = self._session
datapoint_class = self._datapoint_class
attributes = {'port': port, 'value': value}
if (timestamp is not None):
attributes['timestamp'] = to_iso_date(timestamp)
attributes = build_request_body('data-point', None, attributes=attributes)
def _process(json):
data = json.get('data')
return datapoint_class(data, session)
return session.post(self._base_url, CB.json(201, _process), json=attributes) | Post a new reading to a timeseries.
A reading is comprised of a `port`, a `value` and a timestamp.
A port is like a tag for the given reading and gives an
indication of the meaning of the value.
The value of the reading can be any valid json value.
The timestamp is considered the time the reading was taken, as
opposed to the `created` time of the data-point which
represents when the data-point was stored in the Helium
API. If the timestamp is not given the server will construct a
timestemp upon receiving the new reading.
Args:
port(string): The port to use for the new data-point
value: The value for the new data-point
Keyword Args:
timestamp(:class:`datetime`): An optional :class:`datetime` object | codesearchnet |
def add_line_if_absent(filename: str, line: str) -> None:
assert "\n" not in line
if not is_line_in_file(filename, line):
log.info("Appending line {!r} to file {!r}", line, filename)
with open(filename, "a") as file:
file.writelines([line]) | Adds a line (at the end) if it's not already in the file somewhere.
Args:
filename: filename to modify (in place)
line: line to append (which must not have a newline in) | juraj-google-style |
def update_query_parameters(url, query_parameters):
scheme, netloc, path, query_string, fragment = urlsplit(url)
url_params = parse_qs(query_string)
url_params.update(query_parameters)
return urlunsplit(
(scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),
) | Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user. | juraj-google-style |
def _invalid_string_quote(self, quote, row, correct_quote=None, col=None):
if (not correct_quote):
correct_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
self.add_message('invalid-string-quote', line=row, args=(quote, correct_quote), **self.get_offset(col)) | Add a message for an invalid string literal quote.
Args:
quote: The quote characters that were found.
row: The row number the quote character was found on.
correct_quote: The quote characters that is required. If None
(default), will use the one from the config.
col: The column the quote characters were found on. | codesearchnet |
def _to_camel_case(string):
components = string.split('_')
return '%s%s' % (
components[0],
''.join(c.title() for c in components[1:]),
) | Return a camel cased version of the input string.
Args:
string (str): A snake cased string.
Returns:
str: A camel cased string. | juraj-google-style |
def StartFlowAndWait(client_id,
token=None,
timeout=DEFAULT_TIMEOUT,
**flow_args):
flow_urn = flow.StartAFF4Flow(
client_id=client_id, token=token, sync=True, **flow_args)
WaitForFlow(flow_urn, token=token, timeout=timeout)
return flow_urn | Runs a flow and waits for it to finish.
Args:
client_id: The client id of the client to run on.
token: The datastore access token.
timeout: How long to wait for a flow to complete, maximum.
**flow_args: Pass through to flow.
Returns:
The urn of the flow that was run. | juraj-google-style |
def _save_metadata(self):
with open(self.paths.metadata(), 'w') as metadata_fd:
utils.json_dump(self.metadata, metadata_fd) | Write this prefix metadata to disk
Returns:
None | codesearchnet |
def input_on_stderr(prompt='', default=None, convert=None):
print(prompt, end='', file=sys.stderr)
value = builtins.input()
return _convert(value, default, convert) | Output a string to stderr and wait for input.
Args:
prompt (str): the message to display.
default: the default value to return if the user
leaves the field empty
convert (callable): a callable to be used to convert
the value the user inserted. If None, the type of
``default`` will be used. | juraj-google-style |
def _handle_request_error(self, orig_request, error, start_response):
headers = [('Content-Type', 'application/json')]
status_code = error.status_code()
body = error.rest_error()
response_status = ('%d %s' % (status_code, httplib.responses.get(status_code, 'Unknown Error')))
cors_handler = self._create_cors_handler(orig_request)
return util.send_wsgi_response(response_status, headers, body, start_response, cors_handler=cors_handler) | Handle a request error, converting it to a WSGI response.
Args:
orig_request: An ApiRequest, the original request from the user.
error: A RequestError containing information about the error.
start_response: A function with semantics defined in PEP-333.
Returns:
A string containing the response body. | codesearchnet |
def __new__(cls, strain_matrix):
vscale = np.ones((6,))
vscale[3:] *= 2
obj = super().__new__(cls, strain_matrix, vscale=vscale)
if not obj.is_symmetric():
raise ValueError("Strain objects must be initialized "
"with a symmetric array or a voigt-notation "
"vector with six entries.")
return obj.view(cls) | Create a Strain object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Note also that the default constructor
does not include the deformation gradient
Args:
strain_matrix (3x3 array-like): the 3x3 array-like
representing the Green-Lagrange strain | juraj-google-style |
def _symmetric_projection(self, n):
q = self._orthogonal_matrix(n)
mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)
if self.seed:
self.seed += 1
c = math_ops.multiply(q, mask)
return math_ops.matmul(c, array_ops.matrix_transpose(c)) | Compute a n x n symmetric projection matrix.
Args:
n: Dimension.
Returns:
A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T. | github-repos |
def plot_lattice_vectors(lattice, ax=None, **kwargs):
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "g"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 3
vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])
vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
return fig, ax | Adds the basis vectors of the lattice provided to a matplotlib Axes
Args:
lattice: Lattice object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax | juraj-google-style |
def convert_response(allocate_quota_response, project_id):
if ((not allocate_quota_response) or (not allocate_quota_response.allocateErrors)):
return _IS_OK
theError = allocate_quota_response.allocateErrors[0]
error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)
if (error_tuple[1].find(u'{') == (- 1)):
return error_tuple
updated_msg = error_tuple[1].format(project_id=project_id, detail=(theError.description or u''))
return (error_tuple[0], updated_msg) | Computes a http status code and message `AllocateQuotaResponse`
The return value a tuple (code, message) where
code: is the http status code
message: is the message to return
Args:
allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`):
the response from calling an api
Returns:
tuple(code, message) | codesearchnet |
def cache_url_data(self, url, data, attempt=0):
if attempt > 5:
raise ValueError('too many attempts at writing to the cache')
key = self.get_key_from_url(url)
if key == "info.rest":
return
current_date = datetime.strftime(self.today, "%Y-%m-%d")
if IS_PYTHON3:
data = data.encode("utf-8")
compressed = zlib.compress(data)
if IS_PYTHON2:
compressed = buffer(compressed)
t = (key, self.genome_build, current_date, self.api_version, compressed)
cmd = "INSERT OR REPLACE INTO ensembl " \
"(key, genome_build, cache_date, api_version, data) VALUES (?,?,?,?,?)"
try:
with self.conn as cursor:
cursor.execute(cmd, t)
except sqlite3.OperationalError:
time.sleep(random.uniform(1, 10))
self.cache_url_data(url, data.decode('utf-8'), attempt + 1) | cache the data retrieved from ensembl
Args:
url: URL for the Ensembl REST service
data: response data from Ensembl | juraj-google-style |
def add(self, other_op):
self._op.logEntries.extend(other_op.logEntries)
self._merge_timestamps(other_op)
self._merge_metric_values(other_op) | Combines `other_op` with the operation held by this aggregator.
N.B. It merges the operations log entries and metric values, but makes
the assumption the operation is consistent. It's the callers
responsibility to ensure consistency
Args:
other_op (
class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`):
an operation merge into this one | juraj-google-style |
def remove_snippet_client(self, name):
if name not in self._snippet_clients:
raise Error(self._device, MISSING_SNIPPET_CLIENT_MSG % name)
client = self._snippet_clients.pop(name)
client.stop_app() | Removes a snippet client from management.
Args:
name: string, the name of the snippet client to remove.
Raises:
Error: if no snippet client is managed under the specified name. | juraj-google-style |
def finalize_options(self):
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.features_dir = os.path.join(self.cwd, 'tests', 'functional', 'features')
self.firmware_dirs = []
root = os.path.join(self.cwd, 'tests', 'functional', 'firmware')
for f in os.listdir(root):
fullpath = os.path.join(root, f)
if os.path.isdir(fullpath):
self.firmware_dirs.append(fullpath) | Populate the attributes.
Args:
self (BDDTestCommand): the ``BDDTestCommand`` instance
Returns:
``None`` | juraj-google-style |
def check_for_session(self, status=None):
status = Status.LAST if status is None else status
return os.path.isfile(self.get_restore_path(status)) and os.path.getsize(self.get_restore_path(status)) > 0 | check_for_session: see if session is in progress
Args:
status (str): step to check if last session reached (optional)
Returns: boolean indicating if session exists | juraj-google-style |
def update_user(self, user_obj):
LOG.info("Updating user %s", user_obj['_id'])
updated_user = self.user_collection.find_one_and_replace(
{'_id': user_obj['_id']},
user_obj,
return_document=pymongo.ReturnDocument.AFTER
)
return updated_user | Update an existing user.
Args:
user_obj(dict)
Returns:
updated_user(dict) | juraj-google-style |
def is_generator_function(obj):
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR)) | Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.