code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def cluster_spec(self):
if self._override_client:
client = self._override_client
else:
from kubernetes import config as k8sconfig
from kubernetes import client as k8sclient
k8sconfig.load_kube_config()
client = k8sclient.CoreV1Api()
cluster_map = {}
for tf_job in ... | Returns a ClusterSpec object based on the latest info from Kubernetes.
We retrieve the information from the Kubernetes master every time this
method is called.
Returns:
A ClusterSpec containing host information returned from Kubernetes.
Raises:
RuntimeError: If any of the pods returned by the master is not in the
`R... | github-repos |
def check_configuration(ctx, base_key, needed_keys):
if (base_key not in ctx.keys()):
exit("[{}ERROR{}] missing configuration for '{}'".format(ERROR_COLOR, RESET_COLOR, base_key))
if (ctx.releaser is None):
exit("[{}ERROR{}] empty configuration for '{}' found".format(ERROR_COLOR, RESET_COLOR, ba... | Confrim a valid configuration.
Args:
ctx (invoke.context):
base_key (str): the base configuration key everything is under.
needed_keys (list): sub-keys of the base key that are checked to make
sure they exist. | codesearchnet |
def execute_before(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
new_scope = Scope("Configuration Scope", sensor_graph, parent.allocator, parent)
new_scope.add_identifier('current_slot', self.slot)
scope_stack.append(new_scope) | Execute statement before children are executed.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources. | juraj-google-style |
def format_counts(counts, header=None):
counts_dict = {}
for (key, val) in counts.items():
key = format_counts_memory(key, header)
counts_dict[key] = val
return counts_dict | Format a single experiment result coming from backend to present
to the Qiskit user.
Args:
counts (dict): counts histogram of multiple shots
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
dict: a formatted counts | codesearchnet |
def to_frame(self, *args):
if (sys.version_info < (3, 6, 0)):
from collections import OrderedDict
impls = OrderedDict()
for (name, obj) in self.items():
impls[name] = obj._impl
else:
impls = get_impls(self)
return _to_frame_inner(impls, args) | Convert the cells in the view into a DataFrame object.
If ``args`` is not given, this method returns a DataFrame that
has an Index or a MultiIndex depending of the number of
cells parameters and columns each of which corresponds to each
cells included in the view.
``args`` can be given to calculate cells values and l... | codesearchnet |
def __init__(self, header, values):
assert isinstance(header, Header), \
'header must be a Ladybug Header object. Got {}'.format(type(header))
assert isinstance(header.analysis_period, AnalysisPeriod), \
'header of {} must have an analysis_period.'.format(self.__class__.... | Initialize hourly discontinuous collection.
Args:
header: A Ladybug Header object. Note that this header
must have an AnalysisPeriod on it that aligns with the
list of values.
values: A list of values. Note that the length of this list
must align with the AnalysisPeriod on the header. | juraj-google-style |
def prepare_soap_body(self, method, parameters, namespace):
tags = []
for name, value in parameters:
tag = "<{name}>{value}</{name}>".format(
name=name, value=escape("%s" % value, {'"': """}))
tags.append(tag)
wrap... | Prepare the SOAP message body for sending.
Args:
method (str): The name of the method to call.
parameters (list): A list of (name, value) tuples containing
the parameters to pass to the method.
namespace (str): tThe XML namespace to use for the method.
Returns:
str: A properly formatted SOAP Body. | juraj-google-style |
def __init__(self, group_type, name, **kwargs):
self._utils = TcExUtils()
self._name = name
self._type = group_type
self._group_data = {'name': name, 'type': group_type}
for arg, value in kwargs.items():
self.add_key_value(arg, value)
... | Initialize Class Properties.
Args:
group_type (str): The ThreatConnect define Group type.
name (str): The name for this Group.
xid (str, kwargs): The external id for this Group. | juraj-google-style |
def _skip_parameter_matching(self) -> bool:
if self.signature.type_params:
return False
if self.ctx.options.analyze_annotated:
return False
return self.signature.has_return_annotation or self.full_name == '__init__' | Check whether we should skip parameter matching.
This is use to skip parameter matching for function calls in the context of
inference (pyi generation). This is to optimize the case where we don't
need to match parameters in cases which the function has explicit type
annotations, meaning that we don't need to infer th... | github-repos |
def load_config(logdir):
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with... | Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object. | juraj-google-style |
def obtain_capture_by_value_ops(dataset):
def capture_by_value(op):
return op.outputs[0].dtype in TENSOR_TYPES_ALLOWLIST or op.type in OP_TYPES_ALLOWLIST
return _traverse(dataset, capture_by_value) | Given an input dataset, finds all allowlisted ops used for construction.
Allowlisted ops are stateful ops which are known to be safe to capture by
value.
Args:
dataset: Dataset to find allowlisted stateful ops for.
Returns:
A list of variant_tensor producing dataset ops used to construct this
dataset. | github-repos |
def get_custom_objects():
return GLOBAL_CUSTOM_OBJECTS | Retrieves a live reference to the global dictionary of custom objects.
Custom objects set using `custom_object_scope()` are not added to the
global dictionary of custom objects, and will not appear in the returned
dictionary.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject... | github-repos |
def _assign_method(self, resource_class, method_type):
method_name = resource_class.get_method_name(
resource_class, method_type)
valid_status_codes = getattr(
resource_class.Meta,
'valid_status_codes',
DEFAULT_VALID_STATUS_CODES
... | Using reflection, assigns a new method to this class.
Args:
resource_class: A resource class
method_type: The HTTP method type | juraj-google-style |
def sendline(self, text):
logger.debug("Sending input '{0}' to '{1}'".format(text, self.name))
try:
return self._spawn.sendline(text)
except pexpect.exceptions.EOF as e:
logger.debug('Raising termination exception.')
raise TerminationException(instance=self, real_exception=e, output=... | Sends an input line to the running program, including os.linesep.
Args:
text (str): The input text to be send.
Raises:
TerminationException: The program terminated before / while / after sending the input.
NestedException: An internal problem occured while waiting for the output. | codesearchnet |
def ModulePath(module_name):
module = importlib.import_module(module_name)
path = inspect.getfile(module)
if compatibility.PY2:
path = path.decode('utf-8')
if os.path.basename(path).startswith('__init__.'):
path = os.path.dirname(path)
if path.endswith('.pyc'):
path = (path[:... | Computes a path to the specified module.
Args:
module_name: A name of the module to get the path for.
Returns:
A path to the specified module.
Raises:
ImportError: If specified module cannot be imported. | codesearchnet |
def next(self):
options = {}
if self._buffer_size:
options['read_buffer_size'] = self._buffer_size
if self._account_id:
options['_account_id'] = self._account_id
while True:
filename = self._next_file()
if (filename is None):
raise StopIteration()
if (... | Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may b... | codesearchnet |
def float_value_convert(dictin, dropfailedvalues=False):
return key_value_convert(dictin, valuefn=float, dropfailedvalues=dropfailedvalues) | Convert values of dictionary to floats
Args:
dictin (DictUpperBound): Input dictionary
dropfailedvalues (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False.
Returns:
Dict: Dictionary with values converted to floats | juraj-google-style |
def git_ls_remote(self, uri, ref):
logger.debug("Invoking git to retrieve commit id for repo %s...", uri)
lsremote_output = subprocess.check_output(['git',
'ls-remote',
uri,
... | Determine the latest commit id for a given ref.
Args:
uri (string): git URI
ref (string): git ref
Returns:
str: A commit id | juraj-google-style |
def tell(self):
self._check_open()
return self.position | Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
``ValueError``: When this stream is closed. | github-repos |
def read_stream(self, start_offset=0, byte_count=None):
try:
return self._api.object_download(self._bucket, self._key,
start_offset=start_offset, byte_count=byte_count)
except Exception as e:
raise e | Reads the content of this object as text.
Args:
start_offset: the start offset of bytes to read.
byte_count: the number of bytes to read. If None, it reads to the end.
Returns:
The text content within the object.
Raises:
Exception if there was an error requesting the object's content. | juraj-google-style |
def v_cross(u, v):
'\n i = u[1]*v[2] - u[2]*v[1]\n j = u[2]*v[0] - u[0]*v[2]\n k = u[0]*v[1] - u[1]*v[0]\n '
i = '(({u1})*({v2}) - ({u2})*({v1}))'.format(u1=u[1], u2=u[2], v1=v[1], v2=v[2])
j = '(({u2})*({v0}) - ({u0})*({v2}))'.format(u0=u[0], u2=u[2], v0=v[0], v2=v[2])
k = '(({u0})*({v1}) -... | muparser cross product function
Compute the cross product of two 3x1 vectors
Args:
u (list or tuple of 3 strings): first vector
v (list or tuple of 3 strings): second vector
Returns:
A list containing a muparser string of the cross product | codesearchnet |
def get_tensor_size(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
return (self.get_tensor_dtype(tensor_name).size *
self.get_tensor_num_entries(tensor_name, partial_layout,
mesh_dimension_to_size)) | The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh... | juraj-google-style |
def calculate_cidr(start_address, end_address):
tmp_addrs = []
try:
tmp_addrs.extend(summarize_address_range(
ip_address(start_address),
ip_address(end_address)))
except (KeyError, ValueError, TypeError):
try:
tmp_addrs.extend(summarize_addres... | The function to calculate a CIDR range(s) from a start and end IP address.
Args:
start_address (:obj:`str`): The starting IP address.
end_address (:obj:`str`): The ending IP address.
Returns:
list of str: The calculated CIDR ranges. | juraj-google-style |
def _deserialize_audience(audience_map):
for audience in audience_map.values():
(condition_structure, condition_list) = condition_helper.loads(audience.conditions)
audience.__dict__.update({'conditionStructure': condition_structure, 'conditionList': condition_list})
return audience_map | Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object. | codesearchnet |
def get_default_backend_config(appdirs):
return {'store': 'sqlalchemy', 'day_start': datetime.time(5, 30, 0), 'fact_min_delta': 1, 'tmpfile_path': os.path.join(appdirs.user_data_dir, '{}.tmp'.format(appdirs.appname)), 'db_engine': 'sqlite', 'db_path': os.path.join(appdirs.user_data_dir, '{}.sqlite'.format(appdirs.a... | Return a default config dictionary.
Args:
appdirs (HamsterAppDirs): ``HamsterAppDirs`` instance encapsulating the apps details.
Returns:
dict: Dictionary with a default configuration.
Note:
Those defaults are independent of the particular config-store. | codesearchnet |
def __init__(self, curriculum_obj, batch_size, max_len, ops, token_by_char):
self._vocab_dict = collections.defaultdict(lambda: 0)
self._vocab_dict[self.UNK] = 0
self._inv_vocab_dict = collections.defaultdict(lambda: self.UNK)
self.curriculum_obj = curriculum_obj
self._max_seq_length = ma... | Creates a TokenDataSource instance.
Args:
curriculum_obj: (LTECurriculum) determines sample complexity.
batch_size: (int) Batch size to generate.
max_len: (int) This is the maximum size of any given sample sequence.
ops: (list(CodeOp)). Task operations that inherit from CodeOp().
token_by_char: (bool) Whether to token... | juraj-google-style |
def get_note(self, noteid, version=None):
params_version = ""
if version is not None:
params_version = '/v/' + str(version)
params = '/i/%s%s' % (str(noteid), params_version)
request = Request(DATA_URL+params)
request.add_header(self.header, self.ge... | Method to get a specific note
Arguments:
- noteid (string): ID of the note to get
- version (int): optional version of the note to get
Returns:
A tuple `(note, status)`
- note (dict): note object
- status (int): 0 on success and -1 otherwise | juraj-google-style |
def update(self):
(data_format, data) = RuuviTagSensor.get_data(self._mac, self._bt_device)
if (data == self._data):
return self._state
self._data = data
if (self._data is None):
self._state = {}
else:
self._state = get_decoder(data_format).decode_data(self._data)
return ... | Get lates data from the sensor and update own state.
Returns:
dict: Latest state | codesearchnet |
def run(self, dag):
cx_runs = dag.collect_runs(["cx"])
for cx_run in cx_runs:
partition = []
chunk = []
for i in range(len(cx_run) - 1):
chunk.append(cx_run[i])
qargs0 = cx_run[i].qargs
qargs1 = cx... | Run one pass of cx cancellation on the circuit
Args:
dag (DAGCircuit): the directed acyclic graph to run on.
Returns:
DAGCircuit: Transformed DAG. | juraj-google-style |
def all(x, axis=None, keepdims=False):
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims) | Bitwise reduction (logical AND).
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s). | github-repos |
def _get_or_create_arg_by_name(state, name, is_kwarg=False):
for arg in state.args + state.kwargs:
if arg.name == name:
return arg
arg = Namespace()
arg.name = name
arg.type.lines = []
arg.description.lines = []
if is_kwarg:
state.kwargs.append(arg)
else:
... | Gets or creates a new Arg.
These Arg objects (Namespaces) are turned into the ArgInfo namedtuples
returned by parse. Each Arg object is used to collect the name, type, and
description of a single argument to the docstring's function.
Args:
state: The state of the parser.
name: The name of the arg to create.
is_kwarg:... | github-repos |
def _contiguous_groups(
length: int,
comparator: Callable[[int, int], bool]
) -> List[Tuple[int, int]]:
result = []
start = 0
while start < length:
past = start + 1
while past < length and comparator(start, past):
past += 1
result.append((start, past)... | Splits range(length) into approximate equivalence classes.
Args:
length: The length of the range to split.
comparator: Determines if two indices have approximately equal items.
Returns:
A list of (inclusive_start, exclusive_end) range endpoints. Each
corresponds to a run of approximately-equivalent items. | juraj-google-style |
def delete_public_ip(access_token, subscription_id, resource_group, public_ip_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/publicIPAddre... | Delete a public ip addresses associated with a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
public_ip_name (str): Name of the public ip address resource.
Returns:
HTTP response. | juraj-google-style |
def _CheckParserCanProcessFileEntry(self, parser, file_entry):
for filter_object in parser.FILTERS:
if filter_object.Match(file_entry):
return True
return False | Determines if a parser can process a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
parser (BaseParser): parser.
Returns:
bool: True if the file entry can be processed by the parser object. | codesearchnet |
def _pretty_print(key_val, sep=': ', min_col_width=39, text_width=None):
if text_width is None:
text_width = get_terminal_size().columns
if text_width < min_col_width:
min_col_width = text_width
ncols = (text_width + 1)
colw = (text_width + 1)
ncols = min(ncols, len(key_val))
... | Print a iterable of key/values
Args:
key_val (list of (str, str)): the pairs of section names and text.
sep (str): separator between section names and text.
min_col_width (int): minimal acceptable column width
text_width (int): text width to use. If set to None, will try to infer
the size of the terminal. | juraj-google-style |
def _ExpectedKeysForEntry(self, entry):
return [entry.name] | Generate a list of expected cache keys for this type of map.
Args:
entry: A GroupMapEntry
Returns:
A list of strings | github-repos |
def forward(self, seq_length=None, position=None):
if position is None and seq_length is None:
raise ValueError('Either position or seq_length must be provided')
if position is None:
position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0)
elif... | Generates a Tensor of sinusoids with different frequencies.
Args:
seq_length: an optional Python int defining the output sequence length.
if the `position` argument is specified.
position: [B, seq_length], optional position for each token in the
sequence, only required when the sequence is packed.
Returns:
[B, seqlen... | github-repos |
def _HasTable(self, table_name):
query = self._HAS_TABLE_QUERY.format(table_name)
self._cursor.execute(query)
return bool(self._cursor.fetchone()) | Determines if a specific table exists.
Args:
table_name (str): name of the table.
Returns:
bool: True if the table exists, false otherwise. | juraj-google-style |
def handle_simple_responses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms) | Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message. | juraj-google-style |
def get_spd_dos(self):
spd_dos = {}
for atom_dos in self.pdos.values():
for (orb, pdos) in atom_dos.items():
orbital_type = _get_orb_type(orb)
if (orbital_type not in spd_dos):
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = ... | Get orbital projected Dos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...} | codesearchnet |
def bbox_transpose(bbox, axis, rows, cols):
x_min, y_min, x_max, y_max = bbox
if axis != 0 and axis != 1:
raise ValueError('Axis must be either 0 or 1.')
if axis == 0:
bbox = [y_min, x_min, y_max, x_max]
if axis == 1:
bbox = [1 - y_max, 1 - x_max, 1 - y_min, 1 - x_min]
r... | Transposes a bounding box along given axis.
Args:
bbox (tuple): A tuple (x_min, y_min, x_max, y_max).
axis (int): 0 - main axis, 1 - secondary axis.
rows (int): Image rows.
cols (int): Image cols. | juraj-google-style |
def cudnn_compatible_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False):
with tf.variable_scope(name, reuse=reuse):
if trainable_initial_states:
init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])
... | CuDNN Compatible GRU implementation.
It should be used to load models saved with CudnnGRUCell to run on CPU.
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
trainable_initial_states: whether to create a special trainab... | codesearchnet |
def _get(self, feed_item):
return self._api().get(profileId=self.profile_id, id=str(feed_item[self._id_field])).execute() | Fetches an item from CM.
Args:
feed_item: Feed item from Bulkdozer feed representing the item to fetch
from CM. | github-repos |
def _build_mask_ds(mask, mask_offset):
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds | Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dat... | juraj-google-style |
def decode_message(self, message_type, encoded_message):
encoded_message = six.ensure_str(encoded_message)
if (not encoded_message.strip()):
return message_type()
dictionary = json.loads(encoded_message)
message = self.__decode_dictionary(message_type, dictionary)
message.check_initialized()... | Merge JSON structure to Message instance.
Args:
message_type: Message to decode data to.
encoded_message: JSON encoded version of message.
Returns:
Decoded instance of message_type.
Raises:
ValueError: If encoded_message is not valid JSON.
messages.ValidationError if merged message is not initialized. | codesearchnet |
def get_version():
if (not INSTALLED):
try:
with open('version.txt', 'r') as v_fh:
return v_fh.read()
except Exception:
warnings.warn('Unable to resolve package version until installed', UserWarning)
return '0.0.0'
return p_version.get_version(... | find current version information
Returns:
(str): version information | codesearchnet |
def _assert_obj_type(pub, name='pub', obj_type=DBPublication):
if (not isinstance(pub, obj_type)):
raise InvalidType(('`%s` have to be instance of %s, not %s!' % (name, obj_type.__name__, pub.__class__.__name__))) | Make sure, that `pub` is instance of the `obj_type`.
Args:
pub (obj): Instance which will be checked.
name (str): Name of the instance. Used in exception. Default `pub`.
obj_type (class): Class of which the `pub` should be instance. Default
:class:`.DBPublication`.
Raises:
InvalidType: When the `pub` is not instance ... | codesearchnet |
def GetUserinfo(credentials, http=None):
http = http or httplib2.Http()
url = _GetUserinfoUrl(credentials)
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
url = _GetUserinfoUrl(credentials)
response, ... | Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email addr... | juraj-google-style |
def AddRoute(self, short_name, long_name, route_type, route_id=None):
if (route_id is None):
route_id = util.FindUniqueId(self.routes)
route = self._gtfs_factory.Route(short_name=short_name, long_name=long_name, route_type=route_type, route_id=route_id)
route.agency_id = self.GetDefaultAgency().agen... | Add a route to this schedule.
Args:
short_name: Short name of the route, such as "71L"
long_name: Full name of the route, such as "NW 21st Ave/St Helens Rd"
route_type: A type such as "Tram", "Subway" or "Bus"
route_id: id of the route or None, in which case a unique id is picked
Returns:
A new Route object | codesearchnet |
def read_nanopubs(fn: str) -> Iterable[Mapping[str, Any]]:
jsonl_flag, json_flag, yaml_flag = False, False, False
if fn == "-" or "jsonl" in fn:
jsonl_flag = True
elif "json" in fn:
json_flag = True
elif re.search("ya?ml", fn):
yaml_flag = True
else:
log.error("... | Read file and generate nanopubs
If filename has *.gz, will read as a gzip file
If filename has *.jsonl*, will parsed as a JSONLines file
IF filename has *.json*, will be parsed as a JSON file
If filename has *.yaml* or *.yml*, will be parsed as a YAML file
Args:
filename (str): filename to read nanopubs from
Return... | juraj-google-style |
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
trial_hp_overrides = scoped_overrides.values()
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get('loop.generative_model_params', loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).p... | Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying... | codesearchnet |
def __deepcopy__(self, memo):
with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):
new_values = []
for value in self._values:
with ops.device(value.device):
new_values.append(copy.deepcopy(value, memo))
copied_variable = type(self)(strategy=self._d... | Perform a deepcopy of the `DistributedVariable`.
Unlike the deepcopy of a regular tf.Variable, this keeps the original
strategy and devices of the `DistributedVariable`. To avoid confusion
with the behavior of deepcopy on a regular `Variable` (which does
copy into new devices), we only allow a deepcopy of a `Distribu... | github-repos |
def create_room(self, alias=None, is_public=False, invitees=None):
response = self.api.create_room(alias=alias,
is_public=is_public,
invitees=invitees)
return self._mkroom(response["room_id"]) | Create a new room on the homeserver.
Args:
alias (str): The canonical_alias of the room.
is_public (bool): The public/private visibility of the room.
invitees (str[]): A set of user ids to invite into the room.
Returns:
Room
Raises:
MatrixRequestError | juraj-google-style |
def load_feature_lists(self, feature_lists):
column_names = []
feature_ranges = []
running_feature_count = 0
for list_id in feature_lists:
feature_list_names = load_lines(self.features_dir + 'X_train_{}.names'.format(list_id))
column_names.extend(featur... | Load pickled features for train and test sets, assuming they are saved
in the `features` folder along with their column names.
Args:
feature_lists: A list containing the names of the feature lists to load.
Returns:
A tuple containing 3 items: train dataframe, test dataframe,
and a list describing the index ranges for... | juraj-google-style |
def foreach_loop(self, context):
logger.debug("starting")
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
lo... | Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | juraj-google-style |
def tryload(self, cfgstr=None, on_error='raise'):
cfgstr = self._rectify_cfgstr(cfgstr)
if self.enabled:
try:
if self.verbose > 1:
self.log('[cacher] tryload fname={}'.format(self.fname))
return self.load(cfgstr)
except... | Like load, but returns None if the load fails due to a cache miss.
Args:
on_error (str): How to handle non-io errors errors. Either raise,
which re-raises the exception, or clear which deletes the cache
and returns None. | juraj-google-style |
def GetPasswdMap(self, since=None):
return PasswdUpdateGetter(self.conf).GetUpdates(source=self, search_base=self.conf['base'], search_filter=self.conf['filter'], search_scope=self.conf['scope'], since=since) | Return the passwd map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of maps.PasswdMap | github-repos |
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2):
all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol)
if all_kpts is not None:
return len(all_kpts) | Returns degeneracy of a given k-point based on structure symmetry
Args:
kpoint (1x3 array): coordinate of the k-point
cartesian (bool): kpoint is in cartesian or fractional coordinates
tol (float): tolerance below which coordinates are considered equal
Returns:
(int or None): degeneracy or None if structure is not ava... | juraj-google-style |
def weights(self):
return self._dedup_weights(self._undeduplicated_weights) | Returns the list of all layer variables/weights.
Note: This will not track the weights of nested `tf.Modules` that are not
themselves Keras layers.
Returns:
A list of variables. | github-repos |
def get_features_for_wav(self, wav_filename, model_settings, sess):
desired_samples = model_settings['desired_samples']
input_dict = {self.wav_filename_placeholder_: wav_filename, self.time_shift_padding_placeholder_: [[0, 0], [0, 0]], self.time_shift_offset_placeholder_: [0, 0], self.background_data_placeholde... | Applies the feature transformation process to the input_wav.
Runs the feature generation process (generally producing a spectrogram from
the input samples) on the WAV file. This can be useful for testing and
verifying implementations being run on other platforms.
Args:
wav_filename: The path to the input audio file.
... | github-repos |
def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:
name = name or self._name + '_price'
with tf.name_scope(name):
discount_curve = cashflow_streams.get_discount_curve(self._discount_curve_type, market, self._mask)
reference_curve = cashflow_streams.g... | Returns the present value of the stream on the valuation date.
Args:
market: An instance of `ProcessedMarketData`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A `Tensor` of shape `batch_shape` containing the modeled price of each
FRA c... | github-repos |
def _calc_dir_size(path):
dir_size = 0
for (root, dirs, files) in os.walk(path):
for fn in files:
full_fn = os.path.join(root, fn)
dir_size += os.path.getsize(full_fn)
return dir_size | Calculate size of all files in `path`.
Args:
path (str): Path to the directory.
Returns:
int: Size of the directory in bytes. | codesearchnet |
def delete_s3_bucket(client, resource):
if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False):
client.delete_bucket(Bucket=resource.id)
return (ActionStatus.SUCCEED, resource.metrics()) | Delete an S3 bucket
This function will try to delete an S3 bucket
Args:
client (:obj:`boto3.session.Session.client`): A boto3 client object
resource (:obj:`Resource`): The resource object to terminate
Returns:
`ActionStatus` | codesearchnet |
def _load_stdlib_versions(self):
lines = self._store.load_stdlib_versions()
versions = {}
for line in lines:
line2 = line.split('
if not line2:
continue
match = re.fullmatch('(.+): (\\d)\\.(\\d+)(?:-(?:(\\d)\\.(\\d+))?)?', line2)
assert match
module, min_m... | Loads the contents of typeshed/stdlib/VERSIONS.
VERSIONS lists the stdlib modules with the Python version in which they were
first added, in the format `{module}: {min_major}.{min_minor}-` or
`{module}: {min_major}.{min_minor}-{max_major}.{max_minor}`.
Returns:
A mapping from module name to version range in the forma... | github-repos |
def fetch_task_to_run(self):
if all((task.is_completed for task in self)):
raise StopIteration('All tasks completed.')
for task in self:
if task.can_run:
return task
logger.warning('Possible deadlock in fetch_task_to_run!')
return None | Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done. | codesearchnet |
def user_is_sponsor(self, user):
sponsors = self.get_true_sponsors()
for sponsor in sponsors:
sp_user = sponsor.user
if (sp_user == user):
return True
return False | Return whether the given user is a sponsor of the activity.
Returns:
Boolean | codesearchnet |
def _readable_flags(transport):
if ('flags' not in transport):
return None
_flag_list = []
flags = transport['flags']
if (flags & dpkt.tcp.TH_SYN):
if (flags & dpkt.tcp.TH_ACK):
_flag_list.append('syn_ack')
else:
_flag_list.append('syn')
elif (flags & ... | Method that turns bit flags into a human readable list
Args:
transport (dict): transport info, specifically needs a 'flags' key with bit_flags
Returns:
list: a list of human readable flags (e.g. ['syn_ack', 'fin', 'rst', ...] | codesearchnet |
def write(self, data, echo=None):
if echo or (echo is None and self.echo):
sys.stdout.write(data.decode('latin1'))
sys.stdout.flush()
self.channel.write(data) | Write data to channel.
Args:
data(bytes): The data to write to the channel.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFError: If the channel was closed before all data was sent. | juraj-google-style |
def _refresh(self, http):
if not self.store:
self._do_refresh_request(http)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.acc... | Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails. | juraj-google-style |
def _request(self, method, resource_uri, **kwargs):
data = kwargs.get('data')
response = method(self.API_BASE_URL + resource_uri,
json=data, headers=self.headers)
response.raise_for_status()
return response.json() | Perform a method on a resource.
Args:
method: requests.`method`
resource_uri: resource endpoint
Raises:
HTTPError
Returns:
JSON Response | juraj-google-style |
def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):
class OrderedUniqueLoader(loader):
'\n Subclasses the given pyYAML `loader` class.\n\n Validates all sibling keys to insure no duplicates.\n\n Returns an OrderedDict instead of a Dict.\n '
NO_DUPE_SIBLINGS = ['sta... | Provides yaml.load alternative with preserved dictionary order.
Args:
stream (string): YAML string to load.
loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe
load.
Returns:
OrderedDict: Parsed YAML. | codesearchnet |
def handle_document(self, item_session: ItemSession, filename: str) -> Actions:
self._waiter.reset()
action = self.handle_response(item_session)
if (action == Actions.NORMAL):
self._statistics.increment(item_session.response.body.size())
item_session.set_status(Status.done, filename=filename... | Process a successful document response.
Returns:
A value from :class:`.hook.Actions`. | codesearchnet |
def run_build_model(self, num_runs=5, silent=False, force_rerun=False):
self.mutation_ddG_avg_outfile = 'Average_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
self.mutation_ddG_raw_outfile = 'Raw_{}.fxout'.format(op.splitext(self.repaired_pdb_outfile)[0])
... | Run FoldX BuildModel command with a mutant file input.
Original command::
foldx --command=BuildModel --pdb=4bxi_Repair.pdb --mutant-file=individual_list.txt --numberOfRuns=5
Args:
num_runs (int):
silent (bool): If FoldX output should be silenced from printing to the shell.
force_rerun (bool): If FoldX BuildModel sho... | juraj-google-style |
def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, extra_feed_dict):
if x.dtype == dtypes.bfloat16:
x = math_ops.cast(x, dtypes.float32)
if y.dtype == dtypes.bfloat16:
y = math_ops.cast(y, dtypes.float32)
if x_data.dtype == dtypes.bfloat16.as_numpy_dtype:
x_data = x... | Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dime... | github-repos |
def destroy_cloudwatch_event(app='', env='dev', region=''):
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('events')
event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region)
for rule in event_rules:
cloudwatch_client.remo... | Destroy Cloudwatch event subscription.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion. | codesearchnet |
def __init__(self, path, script, optimized=True):
self.path = path
self.script = script
if optimized:
library_path = "%s:%s" % (
os.path.join(path, 'build/optimized'),
os.path.join(path, 'build... | Initialization function.
Args:
path (str): absolute path to the ns-3 installation this Runner
should lock on.
script (str): ns-3 script that will be used by this Runner.
optimized (bool): whether this Runner should build ns-3 with the
optimized profile. | juraj-google-style |
def to_string(cls, error_code):
if error_code == cls.COMPARE_ERROR:
return 'Error comparing flash content to programming data.'
elif error_code == cls.PROGRAM_ERASE_ERROR:
return 'Error during program/erase phase.'
elif error_code == cls.VERIFICATION_ERROR:
... | Returns the string message for the given ``error_code``.
Args:
cls (JLinkFlashErrors): the ``JLinkFlashErrors`` class
error_code (int): error code to convert
Returns:
An error string corresponding to the error code.
Raises:
ValueError: if the error code is invalid. | juraj-google-style |
def stdout(self):
if (not self.id):
raise WorkflowError('Workflow is not running. Cannot get stdout.')
if self.batch_values:
raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for stdout.')
wf = self.workflow.get(self.id)
stdout_list = []
for task in wf['tas... | Get stdout from all the tasks of a workflow.
Returns:
(list): tasks with their stdout
Example:
>>> workflow.stdout
[
{
"id": "4488895771403082552",
"taskType": "AOP_Strip_Processor",
"name": "Task1",
"stdout": "............"
}
] | codesearchnet |
def to_dict(self, filter=True):
result = {}
for (k, v) in self:
r = _to_dict(v, filter)
if r:
result[k] = r
return result | Returns a dictionary with the values of the model. Note that the values
of the leafs are evaluated to python types.
Args:
filter (bool): If set to ``True``, show only values that have been set.
Returns:
dict: A dictionary with the values of the model.
Example:
>>> pretty_print(config.to_dict(filter=True))
>>> {
>>>... | codesearchnet |
def set_input(self, p_name, value):
name = self.python_names.get(p_name)
if p_name is None or name not in self.get_input_names():
raise ValueError('Invalid input "{}"'.format(p_name))
self.step_inputs[name] = value | Set a Step's input variable to a certain value.
The value comes either from a workflow input or output of a previous
step.
Args:
name (str): the name of the Step input
value (str): the name of the output variable that provides the
value for this input.
Raises:
ValueError: The name provided is not a valid input name ... | juraj-google-style |
def set_mac_address(self, mac_address=None, default=False, disable=False):
base_command = 'ip virtual-router mac-address'
if ((not default) and (not disable)):
if (mac_address is not None):
if (not re.match('(?:[a-f0-9]{2}:){5}[a-f0-9]{2}', mac_address)):
raise ValueError('ma... | Sets the virtual-router mac address
This method will set the switch virtual-router mac address. If a
virtual-router mac address already exists it will be overwritten.
Args:
mac_address (string): The mac address that will be assigned as
the virtual-router mac address. This should be in the format,
aa:bb:cc:dd:ee:ff.
d... | codesearchnet |
def count_up_to(self, limit):
raise NotImplementedError | Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is... | github-repos |
def db(self, entity, query_filters="size=10"):
if self.entity_api_key == "":
return {'status': 'failure', 'response': 'No API key found in request'}
historic_url = self.base_url + "api/0.1.0/historicData?" + query_filters
historic_headers = {
"apikey": self.enti... | This function allows an entity to access the historic data.
Args:
entity (string): Name of the device to listen to
query_filters (string): Elastic search response format string
example, "pretty=true&size=10" | juraj-google-style |
def WritePathHashHistory(self, client_path, hash_entries):
client_path_history = ClientPathHistory()
for timestamp, hash_entry in iteritems(hash_entries):
client_path_history.AddHashEntry(timestamp, hash_entry)
self.MultiWritePathHistory({client_path: client_path_history}) | Writes a collection of `Hash` observed for particular path.
Args:
client_path: A `ClientPath` instance.
hash_entries: A dictionary with timestamps as keys and `Hash` instances as
values. | juraj-google-style |
def parse_arguments(argv):
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured data.')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.ad... | Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
Raises:
ValueError: for bad parameters | juraj-google-style |
def IsNotNone(*fields, default=None):
when_clauses = [
expressions.When(
~expressions.Q(**{field: None}),
then=expressions.F(field)
)
for field in reversed(fields)
]
return expressions.Case(
*when_clauses,
default=expressions.Value(defau... | Selects whichever field is not None, in the specified order.
Arguments:
fields:
The fields to attempt to get a value from,
in order.
default:
The value to return in case all values are None.
Returns:
A Case-When expression that tries each field and
returns the specified default value when all of
them are None. | juraj-google-style |
def _describe_bitmask(bits: int, table: Dict[(Any, str)], default: str='0') -> str:
result = []
for (bit, name) in table.items():
if (bit & bits):
result.append(name)
if (not result):
return default
return '|'.join(result) | Returns a bitmask in human readable form.
This is a private function, used internally.
Args:
bits (int): The bitmask to be represented.
table (Dict[Any,str]): A reverse lookup table.
default (Any): A default return value when bits is 0.
Returns: str: A printable version of the bits variable. | codesearchnet |
def to_representation(self, instance):
updated_program = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(updated_program['uuid'])
for course in updated_program['co... | Return the updated program data dictionary.
Arguments:
instance (dict): The program data.
Returns:
dict: The updated program data. | codesearchnet |
def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output... | Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object | juraj-google-style |
def bitwise_or(x, y):
if any_symbolic_tensors((x, y)):
return BitwiseOr().symbolic_call(x, y)
return backend.numpy.bitwise_or(x, y) | Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of the
integers in the input arrays. This ufunc implements the C/Python operator
`|`.
Args:
x: Input integer tensor.
y: Input integer tensor.
Returns:
Result tensor. | github-repos |
def random(cls, components, width=False, colour=None):
try:
list_of_Decors = [Decor.random(c) for c in [i[0] for i in components.unique if i[0]]]
except:
try:
list_of_Decors = [Decor.random(c) for c in components.copy()]
except:
list_of_Decors = [Decor.random(comp... | Generate a random legend for a given list of components.
Args:
components (list or Striplog): A list of components. If you pass
a Striplog, it will use the primary components. If you pass a
component on its own, you will get a random Decor.
width (bool): Also generate widths for the components, based on the
order in w... | codesearchnet |
def getLanguage(self, body, ):
resourcePath = '/text/detect_language'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
postData = body
response = self.apiClient._callAPI(... | Detect the language of a text
Args:
body, str: Your input text (UTF-8) (required)
Returns: LanguageRest | juraj-google-style |
def wait_for_tx(self, tx, max_seconds=120):
tx_hash = None
if isinstance(tx, (str, UInt256)):
tx_hash = str(tx)
elif isinstance(tx, Transaction):
tx_hash = tx.Hash.ToString()
else:
raise AttributeError(("Supplied tx is type '%s', but must be Transaction or UInt256 or str" % type(... | Wait for tx to show up on blockchain
Args:
tx (Transaction or UInt256 or str): Transaction or just the hash
max_seconds (float): maximum seconds to wait for tx to show up. default: 120
Returns:
True: if transaction was found
Raises:
AttributeError: if supplied tx is not Transaction or UInt256 or str
TxNotFoundInBloc... | codesearchnet |
def fetch_support_file(name, timestamp_tuple):
stored_filename = os.path.join(_subpar_package, 'runtime', name)
content = pkgutil.get_data(_subpar_package, 'runtime/' + name)
if content is None:
raise error.Error("Internal error: Can't find runtime support file [%s]" % name)
return stored_resour... | Read a file from the runtime package
Args:
name: filename in runtime package's directory
timestamp_tuple: Stored timestamp, as ZipInfo tuple
Returns:
A StoredResource representing the content of that file | github-repos |
def add_defaults_to_kwargs(defaults, **kwargs):
defaults = dict(defaults)
defaults.update(kwargs)
return defaults | Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictionary of keys and values
**kwargs: The kwargs to update.
Returns:
The updated kwargs. | juraj-google-style |
def get_pending_servermanager():
vname = 'CurrentRebootAttempts'
key = 'SOFTWARE\\Microsoft\\ServerManager'
reg_ret = __utils__['reg.read_value']('HKLM', key, vname)
if reg_ret['success']:
log.debug('Found key: %s', key)
try:
if (int(reg_ret['vdata']) > 0):
re... | Determine whether there are pending Server Manager tasks that require a
reboot.
.. versionadded:: 2016.11.0
Returns:
bool: ``True`` if there are pending Server Manager tasks, otherwise
``False``
CLI Example:
.. code-block:: bash
salt '*' system.get_pending_servermanager | codesearchnet |
def __call__(self, kl_fn):
if not callable(kl_fn):
raise TypeError('kl_fn must be callable, received: %s' % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError('KL(%s || %s) has already been registered to: %s' % (self._key[0].__name__, self._key[1].__name__, _DIVERGENCES[self._key]))
_DIVE... | Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes. | github-repos |
def sine(w, A=1, phi=0, offset=0):
from math import sin
def f(i):
return ((A * sin(((w * i) + phi))) + offset)
return partial(force, sequence=_advance(f)) | Return a driver function that can advance a sequence of sine values.
.. code-block:: none
value = A * sin(w*i + phi) + offset
Args:
w (float) : a frequency for the sine driver
A (float) : an amplitude for the sine driver
phi (float) : a phase offset to start the sine driver with
offset (float) : a global offset to a... | codesearchnet |
def prop(pode, prop):
form = pode[0][0]
if prop.startswith(form):
prop = prop[len(form):]
if (prop[0] == ':'):
prop = prop[1:]
return pode[1]['props'].get(prop) | Return the valu of a given property on the node.
Args:
pode (tuple): A packed node.
prop (str): Property to retrieve.
Notes:
The prop argument may be the full property name (foo:bar:baz), relative property name (:baz) , or the unadorned
property name (baz).
Returns: | codesearchnet |
def zip_cluster(data, k, init=None, max_iters=100):
(genes, cells) = data.shape
(init, new_assignments) = kmeans_pp((data + eps), k, centers=init)
centers = np.copy(init)
M = np.zeros(centers.shape)
assignments = new_assignments
for c in range(k):
(centers[(:, c)], M[(:, c)]) = zip_fit_p... | Performs hard EM clustering using the zero-inflated Poisson distribution.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
init (array, optional): Initial centers - genes x k array. Default: None, use kmeans++
max_iters (int, optional): Maximum number of iterations. Default: 100
Returns:
assi... | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.