code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def to_FIB(self, other):
if not isinstance(other, GroundedFunctionNetwork):
raise TypeError(
f"Expected GroundedFunctionNetwork, but got {type(other)}"
)
def shortname(var):
return var[var.find("::") + 2 : var.rfind("_")]
def shortname_vars(graph, shortname):
return [v for v in graph.nodes() if shortname in v]
this_var_nodes = [
shortname(n)
for (n, d) in self.nodes(data=True)
if d["type"] == "variable"
]
other_var_nodes = [
shortname(n)
for (n, d) in other.nodes(data=True)
if d["type"] == "variable"
]
shared_vars = set(this_var_nodes).intersection(set(other_var_nodes))
full_shared_vars = {
full_var
for shared_var in shared_vars
for full_var in shortname_vars(self, shared_var)
}
return ForwardInfluenceBlanket(self, full_shared_vars) | Creates a ForwardInfluenceBlanket object representing the
intersection of this model with the other input model.
Args:
other: The GroundedFunctionNetwork object to compare this model to.
Returns:
A ForwardInfluenceBlanket object to use for model comparison. | juraj-google-style |
def compute_distance(a, b):
if (not a):
return len(b)
if (not b):
return len(a)
if ((a == b) or (str.lower(a) == str.lower(b))):
return 0
a = str.lower(a)
b = str.lower(b)
vector_1 = ([(- 1)] * (len(b) + 1))
vector_2 = ([(- 1)] * (len(b) + 1))
for i in range(len(vector_1)):
vector_1[i] = i
for i in range(len(a)):
vector_2[0] = (i + 1)
for j in range(len(b)):
penalty = (0 if (a[i] == b[j]) else compute_qwerty_distance(a[i], b[j]))
vector_2[(j + 1)] = min((vector_2[j] + 1), (vector_1[(j + 1)] + 1), (vector_1[j] + penalty))
for j in range(len(vector_1)):
vector_1[j] = vector_2[j]
return vector_2[len(b)] | Computes a modified Levenshtein distance between two strings, comparing the
lowercase versions of each string and accounting for QWERTY distance.
Arguments:
- a (str) String to compare to 'b'
- b (str) String to compare to 'a'
Returns:
- (int) Number representing closeness of 'a' and 'b' (lower is better) | codesearchnet |
def learning_phase():
graph = ops.get_default_graph()
if graph is getattr(_GRAPH, 'graph', None):
learning_phase = symbolic_learning_phase()
else:
with ops.init_scope():
learning_phase = _GRAPH_LEARNING_PHASES[None]
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase | Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer). | github-repos |
def __init__(self, input_bytes):
self.fdp = atheris.FuzzedDataProvider(input_bytes) | FuzzingHelper initializer.
Args:
input_bytes: Input randomized bytes used to create a FuzzedDataProvider. | github-repos |
def get_apod(cls, date=None, hd=False):
instance = cls('planetary/apod')
filters = {
'date': date,
'hd': hd
}
return instance.get_resource(**filters) | Returns Astronomy Picture of the Day
Args:
date: date instance (default = today)
hd: bool if high resolution should be included
Returns:
json | juraj-google-style |
def verify_permitted_to_read(gs_path):
from . import _bucket
bucket, prefix = _bucket.parse_name(gs_path)
credentials = None
if datalab.context.Context.is_signed_in():
credentials = datalab.context._utils.get_credentials()
args = {
'maxResults': Api._MAX_RESULTS,
'projection': 'noAcl'
}
if prefix is not None:
args['prefix'] = prefix
url = Api._ENDPOINT + (Api._OBJECT_PATH % (bucket, ''))
try:
datalab.utils.Http.request(url, args=args, credentials=credentials)
except datalab.utils.RequestException as e:
if e.status == 401:
raise Exception('Not permitted to read from specified path. '
'Please sign in and make sure you have read access.')
raise e | Check if the user has permissions to read from the given path.
Args:
gs_path: the GCS path to check if user is permitted to read.
Raises:
Exception if user has no permissions to read. | juraj-google-style |
def unescape(inp, quote='"'):
if len(inp) < 2:
return inp
output = ""
unesc = False
for act in inp:
if act == quote and unesc:
output = output[:-1]
output += act
if act == "\\":
unesc = not unesc
else:
unesc = False
return output | Unescape `quote` in string `inp`.
Example usage::
>> unescape('hello \\"')
'hello "'
Args:
inp (str): String in which `quote` will be unescaped.
quote (char, default "): Specify which character will be unescaped.
Returns:
str: Unescaped string. | juraj-google-style |
def transform_module(self, mod, user_context):
result = []
for member in mod.__dict__.values():
if inspect.ismodule(member):
continue
try:
result.append(self.transform(member, user_context))
except NotImplementedError:
pass
return result | Transforms a module.
Subclasses may override this method. The return value is opaque.
The method receives the original AST. The result is passed as-is to the
output of `transform`.
Args:
mod: A Python module.
user_context: An opaque object (may be None) that is forwarded to
transform_ast, through the ctx.user attribute.
Returns:
List[Tuple[Any, Any]]. By default it returns the output of transform_ast,
evaluated on each supported member, other than modules, together with a
`transformer.Context` containing information about the transformation
process. | github-repos |
def __init__(self, engine: trt.ICudaEngine):
from cuda import cuda
import tensorrt as trt
self.engine = engine
self.context = engine.create_execution_context()
self.context_lock = threading.RLock()
self.inputs = []
self.outputs = []
self.gpu_allocations = []
self.cpu_allocations = []
try:
_ = np.bool
except AttributeError:
np.bool = np.bool_
for i in range(self.engine.num_bindings):
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
size = trt.volume(shape) * dtype.itemsize
allocation = _assign_or_fail(cuda.cuMemAlloc(size))
binding = {'index': i, 'name': name, 'dtype': np.dtype(trt.nptype(dtype)), 'shape': list(shape), 'allocation': allocation, 'size': size}
self.gpu_allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
else:
self.outputs.append(binding)
assert self.context
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.gpu_allocations) > 0
for output in self.outputs:
self.cpu_allocations.append(np.zeros(output['shape'], output['dtype']))
self.stream = _assign_or_fail(cuda.cuStreamCreate(0)) | Implementation of the TensorRTEngine class which handles
allocations associated with TensorRT engine.
Example Usage::
TensorRTEngine(engine)
Args:
engine: trt.ICudaEngine object that contains TensorRT engine | github-repos |
def ProtoEq(a, b):
def Format(pb):
if isinstance(pb, message.Message):
return dict(((desc.number, value) for desc, value in pb.ListFields()))
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb
a, b = (Format(a), Format(b))
if not isinstance(a, dict) or not isinstance(b, dict):
return a == b
for tag in sorted(set(a.keys()) | set(b.keys())):
if tag not in a or tag not in b:
return False
elif not ProtoEq(a[tag], b[tag]):
return False
return True | Compares two proto2 objects for equality.
Recurses into nested messages. Uses list (not set) semantics for comparing
repeated fields, ie duplicates and order matter.
Args:
a: A proto2 message or a primitive.
b: A proto2 message or a primitive.
Returns:
`True` if the messages are equal. | github-repos |
def mt_report(context, case_id, test, outpath=None):
LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id))
adapter = context.obj['adapter']
query = {'chrom':'MT'}
case_obj = adapter.case(case_id=case_id)
if not case_obj:
LOG.warning('Could not find a scout case with id "{}". No report was created.'.format(case_id))
context.abort()
samples = case_obj.get('individuals')
mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants= -1, sort_key='position'))
if not mt_variants:
LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id))
context.abort()
today = datetime.datetime.now().strftime('%Y-%m-%d')
if not outpath:
outpath = str(os.getcwd())
written_files = 0
for sample in samples:
sample_id = sample['individual_id']
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)
document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx'
workbook = Workbook(os.path.join(outpath,document_name))
Report_Sheet = workbook.add_worksheet()
if test and sample_lines and workbook:
written_files +=1
continue
row = 0
for col,field in enumerate(MT_EXPORT_HEADER):
Report_Sheet.write(row,col,field)
for row, line in enumerate(sample_lines,1):
for col, field in enumerate(line):
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(outpath,document_name)):
written_files += 1
if test:
LOG.info("Number of excel files that can be written to folder {0}: {1}".format(outpath, written_files))
else:
LOG.info("Number of excel files written to folder {0}: {1}".format(outpath, written_files))
return written_files | Export all mitochondrial variants for each sample of a case
and write them to an excel file
Args:
adapter(MongoAdapter)
case_id(str)
test(bool): True if the function is called for testing purposes
outpath(str): path to output file
Returns:
written_files(int): number of written or simulated files | juraj-google-style |
def is_symbolic_tensor(tensor):
if isinstance(tensor, tensor_lib.Tensor):
return hasattr(tensor, 'graph')
elif is_extension_type(tensor):
component_tensors = nest.flatten(tensor, expand_composites=True)
return any((hasattr(t, 'graph') for t in component_tensors))
elif isinstance(tensor, variables.Variable):
return getattr(tensor, '_keras_history', False) or not context.executing_eagerly()
elif isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
return is_symbolic_tensor(tensor)
else:
return False | Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Args:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors. | github-repos |
def Print(self, output_writer):
if self._filters:
output_writer.Write('Filters:\n')
for file_entry_filter in self._filters:
file_entry_filter.Print(output_writer) | Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer. | juraj-google-style |
def has_axis(self, axis):
if (self.type != EventType.POINTER_AXIS):
raise AttributeError(_wrong_meth.format(self.type))
return self._libinput.libinput_event_pointer_has_axis(self._handle, axis) | Check if the event has a valid value for the given axis.
If this method returns True for an axis and :meth:`get_axis_value`
returns a value of 0, the event is a scroll stop event.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises
:exc:`AttributeError`.
Args:
axis (~libinput.constant.PointerAxis): The axis to check.
Returns:
bool: True if this event contains a value for this axis.
Raises:
AttributeError | codesearchnet |
def stderr(self):
if (not self.id):
raise WorkflowError('Workflow is not running. Cannot get stderr.')
if self.batch_values:
raise NotImplementedError('Query Each Workflow Id within the Batch Workflow for stderr.')
wf = self.workflow.get(self.id)
stderr_list = []
for task in wf['tasks']:
stderr_list.append({'id': task['id'], 'taskType': task['taskType'], 'name': task['name'], 'stderr': self.workflow.get_stderr(self.id, task['id'])})
return stderr_list | Get stderr from all the tasks of a workflow.
Returns:
(list): tasks with their stderr
Example:
>>> workflow.stderr
[
{
"id": "4488895771403082552",
"taskType": "AOP_Strip_Processor",
"name": "Task1",
"stderr": "............"
}
] | codesearchnet |
def prepare(self):
msg = aioxmpp.stanza.Message(to=self.to, from_=self.sender, type_=aioxmpp.MessageType.CHAT)
msg.body[None] = self.body
if len(self.metadata):
data = forms_xso.Data(type_=forms_xso.DataType.FORM)
for (name, value) in self.metadata.items():
data.fields.append(forms_xso.Field(var=name, type_=forms_xso.FieldType.TEXT_SINGLE, values=[value]))
if self.thread:
data.fields.append(forms_xso.Field(var='_thread_node', type_=forms_xso.FieldType.TEXT_SINGLE, values=[self.thread]))
data.title = SPADE_X_METADATA
msg.xep0004_data = [data]
return msg | Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.
Returns:
aioxmpp.stanza.Message: the message prepared to be sent | codesearchnet |
def Parse(self, conditions, host_data):
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
artifact_data = host_data.get(p.artifact)
if (not p.result_context):
rdf_data = artifact_data['PARSER']
else:
rdf_data = artifact_data.get(str(p.result_context))
try:
result = p.Parse(rdf_data)
except ProcessingError as e:
raise ProcessingError(('Bad artifact %s: %s' % (p.artifact, e)))
if result:
processed.append(result)
return self.matcher.Detect(probes, processed) | Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists. | codesearchnet |
def entropy(state):
rho = np.array(state)
if rho.ndim == 1:
return 0
evals = np.maximum(np.linalg.eigvalsh(state), 0.)
return shannon_entropy(evals, base=np.e) | Compute the von-Neumann entropy of a quantum state.
Args:
state (array_like): a density matrix or state vector.
Returns:
float: The von-Neumann entropy S(rho). | juraj-google-style |
def set_timezone(tz=None, deploy=False):
if (not tz):
raise CommandExecutionError('Timezone name option must not be none.')
ret = {}
query = {'type': 'config', 'action': 'set', 'xpath': "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone", 'element': '<timezone>{0}</timezone>'.format(tz)}
ret.update(__proxy__['panos.call'](query))
if (deploy is True):
ret.update(commit())
return ret | Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone UTC
salt '*' panos.set_timezone UTC deploy=True | codesearchnet |
def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_transpiler):
pass_manager = PassManager()
pass_manager.property_set['layout'] = initial_layout
pass_manager.append(Unroller(basis_gates))
pass_manager.append(TrivialLayout(coupling_map),
condition=lambda property_set: not property_set['layout'])
pass_manager.append(CheckMap(coupling_map))
pass_manager.append(DenseLayout(coupling_map),
condition=lambda property_set: not property_set['is_swap_mapped'])
pass_manager.append(FullAncillaAllocation(coupling_map))
pass_manager.append(EnlargeWithAncilla())
pass_manager.append(Unroll3qOrMore())
pass_manager.append(LegacySwap(coupling_map, trials=20, seed=seed_transpiler))
pass_manager.append(Decompose(SwapGate))
pass_manager.append(CXDirection(coupling_map))
pass_manager.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))
simplification_passes = [Optimize1qGates(), CXCancellation(), RemoveResetInZeroState()]
pass_manager.append(simplification_passes + [Depth(), FixedPoint('depth')],
do_while=lambda property_set: not property_set['depth_fixed_point'])
return pass_manager | The default pass manager that maps to the coupling map.
Args:
basis_gates (list[str]): list of basis gate names supported by the target.
coupling_map (CouplingMap): coupling map to target in mapping.
initial_layout (Layout or None): initial layout of virtual qubits on physical qubits
seed_transpiler (int or None): random seed for stochastic passes.
Returns:
PassManager: A pass manager to map and optimize. | juraj-google-style |
def append(self, transitions, rows=None):
rows = (tf.range(self._capacity) if (rows is None) else rows)
assert (rows.shape.ndims == 1)
assert_capacity = tf.assert_less(rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(tf.gather(self._length, rows), self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map((lambda var, val: tf.scatter_nd_update(var, indices, val)), self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask) | Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation. | codesearchnet |
def get_metric_function(metric, output_shape=None, loss_fn=None):
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
return metrics_module.get(metric)
is_sparse_categorical_crossentropy = isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy)
is_binary_crossentropy = isinstance(loss_fn, losses.BinaryCrossentropy) or (isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy)
if metric in ['accuracy', 'acc']:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_accuracy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_accuracy
return metrics_module.categorical_accuracy
else:
if output_shape[-1] == 1 or is_binary_crossentropy:
return metrics_module.binary_crossentropy
elif is_sparse_categorical_crossentropy:
return metrics_module.sparse_categorical_crossentropy
return metrics_module.categorical_crossentropy | Returns the metric function corresponding to the given metric input.
Args:
metric: Metric function name or reference.
output_shape: The shape of the output that this metric will be calculated
for.
loss_fn: The loss function used.
Returns:
The metric function. | github-repos |
def render_asset_html(self, path, tag_template):
url = os.path.join(settings.STATIC_URL, path)
return tag_template.format(url=url) | Render HTML tag for a given path.
Arguments:
path (string): Relative path from static directory.
tag_template (string): Template string for HTML tag.
Returns:
string: HTML tag with url from given path. | codesearchnet |
def reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float):
squared_phase = np.dot(reflection_matrix[(:, 0)], reflection_matrix[(0, :)])
phase = complex(np.sqrt(squared_phase))
i = (np.eye(reflection_matrix.shape[0]) * phase)
pos_part = ((i + reflection_matrix) * 0.5)
neg_part = ((i - reflection_matrix) * 0.5)
pos_factor = (phase ** (exponent - 1))
neg_factor = (pos_factor * (complex((- 1)) ** exponent))
pos_part_raised = (pos_factor * pos_part)
neg_part_raised = (neg_part * neg_factor)
return (pos_part_raised + neg_part_raised) | Raises a matrix with two opposing eigenvalues to a power.
Args:
reflection_matrix: The matrix to raise to a power.
exponent: The power to raise the matrix to.
Returns:
The given matrix raised to the given power. | codesearchnet |
def __init__(self, element=None):
super(RootTreeMapNode, self).__init__(element)
self._depth = 0 | Constructor.
Args:
element: object to attach to this root. | juraj-google-style |
def merge_single_qubit_gates_into_phased_x_z(circuit: circuits.Circuit, atol: float=1e-08) -> None:
def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]:
out_gates = decompositions.single_qubit_matrix_to_phased_x_z(matrix, atol)
return [gate(qubit) for gate in out_gates]
MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit) | Canonicalizes runs of single-qubit rotations in a circuit.
Specifically, any run of non-parameterized circuits will be replaced by an
optional PhasedX operation followed by an optional Z operation.
Args:
circuit: The circuit to rewrite. This value is mutated in-place.
atol: Absolute tolerance to angle error. Larger values allow more
negligible gates to be dropped, smaller values increase accuracy. | codesearchnet |
def _process(op_queue, seen_ops):
reads = []
writes = []
op = op_queue.pop()
if op in seen_ops:
return (reads, writes)
seen_ops.add(op)
reads, writes = acd_utils.get_read_write_resource_inputs(op)
op_queue.extend((t.op for t in op.inputs if t.dtype == dtypes.variant))
return (reads, writes) | Processes the next element of the op queue.
Args:
op_queue: Queue of Dataset operations to process.
seen_ops: Already processed set of Operations.
Returns:
A 2-tuple containing sets of resource handles. The first tuple entry
contains read-only handles and the second entry contains read-write
handles. | github-repos |
def __init__(self, port, observer):
self._web_server = gui.websocket_server.WebSocketServer()
self._port = port
self._observer = observer
self._clients = set() | Instantiates a GUI server.
Args:
port: tcp/ssl port for internal web server.
observer: GuiObserver called on requests. | github-repos |
def _merge_bee(self, bee):
random_dimension = randint(0, (len(self._value_ranges) - 1))
second_bee = randint(0, (self._num_employers - 1))
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, (self._num_employers - 1))
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension])
fitness_score = new_bee.get_score(self._fitness_fxn(new_bee.values, **self._args))
return (fitness_score, new_bee.values, new_bee.error) | Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position) | codesearchnet |
def Add(self, entry):
if not isinstance(entry, MapEntry):
raise TypeError('Not instance of MapEntry')
if not entry.Verify():
self.log.info('refusing to add entry, verify failed')
return False
if entry.Key() not in self._data:
self._index.append(entry.Key())
else:
self.log.warning('duplicate key detected when adding to map: %r, overwritten', entry.Key())
self._data[entry.Key()] = entry
return True | Add a MapEntry object to the Map and verify it (overwrites).
Args:
entry: A maps.MapEntry instance.
Returns:
A boolean indicating the add is successful when True.
Raises:
TypeError: The object passed is not the right type. | github-repos |
def new_from_list(cls, items, **kwargs):
obj = cls(**kwargs)
for item in items:
obj.append(ListItem(item))
return obj | Populates the ListView with a string list.
Args:
items (list): list of strings to fill the widget with. | codesearchnet |
def entropy(state):
rho = np.array(state)
if (rho.ndim == 1):
return 0
evals = np.maximum(np.linalg.eigvalsh(state), 0.0)
return shannon_entropy(evals, base=np.e) | Compute the von-Neumann entropy of a quantum state.
Args:
state (array_like): a density matrix or state vector.
Returns:
float: The von-Neumann entropy S(rho). | codesearchnet |
def train(self, input_data_config, output_data_config, hyperparameters, job_name):
self.container_root = self._create_tmp_folder()
os.mkdir(os.path.join(self.container_root, 'output'))
os.mkdir(os.path.join(self.container_root, 'output', 'data'))
shared_dir = os.path.join(self.container_root, 'shared')
os.mkdir(shared_dir)
data_dir = self._create_tmp_folder()
volumes = self._prepare_training_volumes(data_dir, input_data_config, output_data_config,
hyperparameters)
hyperparameters = self._update_local_src_path(hyperparameters, key=sagemaker.estimator.DIR_PARAM_NAME)
for host in self.hosts:
_create_config_file_directories(self.container_root, host)
self.write_config_files(host, hyperparameters, input_data_config)
shutil.copytree(data_dir, os.path.join(self.container_root, host, 'input', 'data'))
training_env_vars = {
REGION_ENV_NAME: self.sagemaker_session.boto_region_name,
TRAINING_JOB_NAME_ENV_NAME: job_name,
}
compose_data = self._generate_compose_file('train', additional_volumes=volumes,
additional_env_vars=training_env_vars)
compose_command = self._compose()
if _ecr_login_if_needed(self.sagemaker_session.boto_session, self.image):
_pull_image(self.image)
process = subprocess.Popen(compose_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
_stream_output(process)
except RuntimeError as e:
msg = "Failed to run: %s, %s" % (compose_command, str(e))
raise RuntimeError(msg)
finally:
artifacts = self.retrieve_artifacts(compose_data, output_data_config, job_name)
dirs_to_delete = [data_dir, shared_dir]
self._cleanup(dirs_to_delete)
print('===== Job Complete =====')
return artifacts | Run a training job locally using docker-compose.
Args:
input_data_config (dict): The Input Data Configuration, this contains data such as the
channels to be used for training.
hyperparameters (dict): The HyperParameters for the training job.
job_name (str): Name of the local training job being run.
Returns (str): Location of the trained model. | juraj-google-style |
def get_lambda_alias_arn(app, account, region):
session = boto3.Session(profile_name=account, region_name=region)
lambda_client = session.client('lambda')
lambda_aliases = lambda_client.list_aliases(FunctionName=app)
matched_alias = None
for alias in lambda_aliases['Aliases']:
if (alias['Name'] == account):
lambda_alias_arn = alias['AliasArn']
LOG.info('Found ARN for alias %s for function %s', account, app)
matched_alias = lambda_alias_arn
break
else:
fatal_message = 'Lambda alias {0} of function {1} not found'.format(account, app)
LOG.fatal(fatal_message)
raise LambdaAliasDoesNotExist(fatal_message)
return matched_alias | Get lambda alias ARN. Assumes that account name is equal to alias name.
Args:
account (str): AWS account name.
region (str): Region name, e.g. us-east-1
app (str): Lambda function name
Returns:
str: ARN for requested lambda alias | codesearchnet |
def request(self, method: str, path: str, content: Optional[Union[(dict, bytes, str)]]=None, timestamp: Optional[int]=None, external_url: Optional[str]=None, headers: Optional[Dict[(str, str)]]=None, query_params: Optional[Dict[(str, Any)]]=None, api_path: str='/_matrix/client/r0') -> Awaitable[dict]:
content = (content or {})
headers = (headers or {})
query_params = (query_params or {})
query_params['access_token'] = self.token
if (timestamp is not None):
if isinstance(timestamp, datetime):
timestamp = int((timestamp.replace(tzinfo=timezone.utc).timestamp() * 1000))
query_params['ts'] = timestamp
if (isinstance(content, dict) and (external_url is not None)):
content['external_url'] = external_url
method = method.upper()
if (method not in ['GET', 'PUT', 'DELETE', 'POST']):
raise MatrixError(('Unsupported HTTP method: %s' % method))
if ('Content-Type' not in headers):
headers['Content-Type'] = 'application/json'
if (headers.get('Content-Type', None) == 'application/json'):
content = json.dumps(content)
if (self.identity and (not self.is_real_user)):
query_params['user_id'] = self.identity
self._log_request(method, path, content, query_params)
endpoint = ((self.base_url + api_path) + path)
return self._send(method, endpoint, content, query_params, (headers or {})) | Make a raw HTTP request.
Args:
method: The HTTP method to use.
path: The API endpoint to call. Does not include the base path (e.g. /_matrix/client/r0).
content: The content to post as a dict (json) or bytes/str (raw).
timestamp: The timestamp query param used for timestamp massaging.
external_url: The external_url field to send in the content
(only applicable if content is dict).
headers: The dict of HTTP headers to send.
query_params: The dict of query parameters to send.
api_path: The base API path.
Returns:
The response as a dict. | codesearchnet |
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
raise exceptions.VersionNotSupported('KMIP {} does not support the AttributeReference object.'.format(kmip_version.value))
local_buffer = BytearrayStream()
if self._vendor_identification:
self._vendor_identification.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The AttributeReference is missing the vendor identification field.')
if self._attribute_name:
self._attribute_name.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The AttributeReference is missing the attribute name field.')
self.length = local_buffer.length()
super(AttributeReference, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer) | Write the AttributeReference structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the vendor identification or attribute name
fields are not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure. | codesearchnet |
def validate_policy(topic, signer, routing_policy, nitpicky=False):
if topic in routing_policy:
if signer in routing_policy[topic]:
return True
else:
_log.error("Authorization/routing_policy error. "
"Topic %r. Signer %r." % (topic, signer))
return False
else:
if nitpicky:
_log.error("Authorization/routing_policy underspecified.")
return False
else:
_log.warning('No routing policy defined for "{t}" but routing_nitpicky is '
'False so the message is being treated as authorized.'.format(t=topic))
return True | Checks that the sender is allowed to emit messages for the given topic.
Args:
topic (str): The message topic the ``signer`` used when sending the message.
signer (str): The Common Name of the certificate used to sign the message.
Returns:
bool: True if the policy defined in the settings allows the signer to send
messages on ``topic``. | juraj-google-style |
def ed25519_private_key_from_string(string):
try:
return Ed25519PrivateKey.from_private_bytes(
base64.b64decode(string)
)
except (UnsupportedAlgorithm, Base64Error) as exc:
raise ScriptWorkerEd25519Error("Can't create Ed25519PrivateKey: {}!".format(str(exc))) | Create an ed25519 private key from ``string``, which is a seed.
Args:
string (str): the string to use as a seed.
Returns:
Ed25519PrivateKey: the private key | juraj-google-style |
def get_existing_path(path, topmost_path=None):
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path | Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found. | juraj-google-style |
def _ParseSystemTime(self, byte_stream):
systemtime_map = self._GetDataTypeMap('systemtime')
try:
systemtime = self._ReadStructureFromByteStream(byte_stream, 0, systemtime_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse SYSTEMTIME value with error: {0!s}'.format(exception))
system_time_tuple = (systemtime.year, systemtime.month, systemtime.weekday, systemtime.day_of_month, systemtime.hours, systemtime.minutes, systemtime.seconds, systemtime.milliseconds)
if (system_time_tuple == self._EMPTY_SYSTEM_TIME_TUPLE):
return None
try:
return dfdatetime_systemtime.Systemtime(system_time_tuple=system_time_tuple)
except ValueError:
raise errors.ParseError('Invalid SYSTEMTIME value: {0!s}'.format(system_time_tuple)) | Parses a SYSTEMTIME date and time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
dfdatetime.Systemtime: SYSTEMTIME date and time value or None if no
value is set.
Raises:
ParseError: if the SYSTEMTIME could not be parsed. | codesearchnet |
def enable_logging(log_level):
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
logfile_handler = logging.StreamHandler(_LOGFILE_STREAM)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(logging.Formatter(
'%(levelname)s [%(asctime)s][%(name)s] %(message)s'))
root_logger.addHandler(logfile_handler)
if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL:
signal.signal(signal.SIGTERM, _logfile_sigterm_handler)
if log_level:
handler = logging.StreamHandler()
handler.setFormatter(_LogColorFormatter())
root_logger.setLevel(log_level)
root_logger.addHandler(handler) | Configure the root logger and a logfile handler.
Args:
log_level: The logging level to set the logger handler. | juraj-google-style |
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:
return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents)) | Returns a partial scope with current next state-fluents.
Args:
next_state_fluents (Sequence[tf.Tensor]): The next state fluents.
Returns:
A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. | codesearchnet |
def stop(self, **kwargs):
return self.client.api.stop(self.id, **kwargs) | Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def crc_update(crc, data):
if type(data) != array.array or data.itemsize != 1:
buf = array.array("B", data)
else:
buf = data
crc = crc ^ _MASK
for b in buf:
table_index = (crc ^ b) & 0xff
crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK
return crc ^ _MASK | Update CRC-32C checksum with data.
Args:
crc: 32-bit checksum to update as long.
data: byte array, string or iterable over bytes.
Returns:
32-bit updated CRC-32C as long. | juraj-google-style |
def get(self, key, default=None):
return self._fetch_cmd(b'get', [key], False).get(key, default) | The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found. | juraj-google-style |
def FindUnspentCoins(self, from_addr=None, use_standard=False, watch_only_val=0):
ret = []
for coin in self.GetCoins():
if coin.State & CoinState.Confirmed > 0 and \
coin.State & CoinState.Spent == 0 and \
coin.State & CoinState.Locked == 0 and \
coin.State & CoinState.Frozen == 0 and \
coin.State & CoinState.WatchOnly == watch_only_val:
do_exclude = False
if self._vin_exclude:
for to_exclude in self._vin_exclude:
if coin.Reference.PrevIndex == to_exclude.PrevIndex and \
coin.Reference.PrevHash == to_exclude.PrevHash:
do_exclude = True
if do_exclude:
continue
if from_addr is not None:
if coin.Output.ScriptHash == from_addr:
ret.append(coin)
elif use_standard:
contract = self._contracts[coin.Output.ScriptHash.ToBytes()]
if contract.IsStandard:
ret.append(coin)
else:
ret.append(coin)
return ret | Finds unspent coin objects in the wallet.
Args:
from_addr (UInt160): a bytearray (len 20) representing an address.
use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).
watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.
Returns:
list: a list of ``neo.Wallet.Coins`` in the wallet that are not spent. | juraj-google-style |
def get_blocks(self, block_ids):
return list(filter((lambda b: (b is not None)), map(self._get_block_by_id_or_none, block_ids))) | Returns all blocks with the given set of block_ids.
If a block id in the provided iterable does not exist in the block
store, it is ignored.
Args:
block_ids (:iterable:str): an iterable of block ids
Returns
list of block wrappers found for the given block ids | codesearchnet |
def cancelHistoricalData(self, bars: BarDataList):
self.client.cancelHistoricalData(bars.reqId)
self.wrapper.endSubscription(bars) | Cancel the update subscription for the historical bars.
Args:
bars: The bar list that was obtained from ``reqHistoricalData``
with a keepUpToDate subscription. | codesearchnet |
def merge( self, other_cluster ):
new_cluster = Cluster( self.sites | other_cluster.sites )
new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites )
return new_cluster | Combine two clusters into a single cluster.
Args:
other_cluster (Cluster): The second cluster to combine.
Returns:
(Cluster): The combination of both clusters. | juraj-google-style |
def sam_verifier(entries, line=None):
regex = r'^[!-?A-~]{1,255}\t' \
+ r'([0-9]{1,4}|[0-5][0-9]{4}|' \
+ r'[0-9]{1,4}|[1-5][0-9]{4}|' \
+ r'6[0-4][0-9]{3}|65[0-4][0-9]{2}|' \
+ r'655[0-2][0-9]|6553[0-7])\t' \
+ r'\*|[!-()+-<>-~][!-~]*\t' \
+ r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \
+ r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \
+ r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \
+ r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \
+ r'6([0-3][0-9]|4[0-7])))))))))\t' \
+ r'([0-9]{1,2}|1[0-9]{2}|' \
+ r'2[0-4][0-9]|25[0-5])\t' \
+ r'\*|([0-9]+[MIDNSHPX=])+\t' \
+ r'\*|=|[!-()+-<>-~][!-~]*\t' \
+ r'([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \
+ r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \
+ r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \
+ r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \
+ r'6([0-3][0-9]|4[0-7])))))))))\t' \
+ r'-?([0-9]{1,9}|1[0-9]{9}|2(0[0-9]{8}|' \
+ r'1([0-3][0-9]{7}|4([0-6][0-9]{6}|' \
+ r'7([0-3][0-9]{5}|4([0-7][0-9]{4}|' \
+ r'8([0-2][0-9]{3}|3([0-5][0-9]{2}|' \
+ r'6([0-3][0-9]|4[0-7])))))))))\t' \
+ r'\*|[A-Za-z=.]+\t' \
+ r'[!-~]+{0}$'.format(os.linesep)
delimiter = r'\t'
for entry in entries:
try:
entry_verifier([entry.write()], regex, delimiter)
except FormatError as error:
if line:
intro = 'Line {0}'.format(str(line))
elif error.part == 0:
intro = 'An entry with reference {0}'.format(entry.rname)
else:
intro = 'An entry with query {0}'.format(entry.qname)
if error.part == 0:
if len(entry.qname) == 0:
msg = '{0} has no query name'.format(intro)
elif len(entry.qname) > 255:
msg = '{0} query name must be less than 255 ' \
'characters'.format(intro)
else:
msg = '{0} query name contains characters not in ' \
'[!-?A-~]'.format(intro)
elif error.part == 1:
msg = '{0} flag not in range [0-(2^31-1)]'.format(intro)
elif error.part == 2:
if len(entry.rname) == 0:
msg = '{0} has no reference name'.format(intro)
else:
msg = '{0} reference name has characters not in ' \
'[!-()+-<>-~][!-~]'.format(intro)
elif error.part == 3:
msg = '{0} leftmost position not in range ' \
'[0-(2^31-1)]'.format(intro)
elif error.part == 4:
msg = '{0} mapping quality not in range ' \
'[0-(2^8-1)]'.format(intro)
elif error.part == 5:
msg = '{0} CIGAR string has characters not in ' \
'[0-9MIDNSHPX=]'.format(intro)
elif error.part == 6:
msg = '{0} mate read name has characters not in ' \
'[!-()+-<>-~][!-~]'.format(intro)
elif error.part == 7:
msg = '{0} mate read position not in range ' \
'[0-(2^31-1)]'.format(intro)
elif error.part == 8:
msg = '{0} template length not in range ' \
'[(-2^31+1)-(2^31-1)]'.format(intro)
elif error.part == 9:
msg = '{0} sequence has characters not in ' \
'[A-Za-z=.]'.format(intro)
elif error.part == 10:
msg = '{0} quality scores has characters not in ' \
'[!-~]'.format(intro)
else:
msg = '{0}: Unknown Error: Likely a Bug'.format(intro)
raise FormatError(message=msg)
if line:
line += 1 | Raises error if invalid SAM format detected
Args:
entries (list): A list of SamEntry instances
line (int): Line number of first entry
Raises:
FormatError: Error when SAM format incorrect with descriptive message | juraj-google-style |
def list(self):
raw_reports = self._swimlane.request('get', 'reports?appId={}'.format(self._app.id)).json()
return [Report(self._app, raw_report) for raw_report in raw_reports if (raw_report['$type'] == Report._type)] | Retrieve all reports for parent app
Returns:
:class:`list` of :class:`~swimlane.core.resources.report.Report`: List of all returned reports | codesearchnet |
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False):
for (name, slot_key) in already_defined.iteritems():
if (not isinstance(slot_key, db.Key)):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if (slot is None):
if self._strict:
raise UnexpectedPipelineError(('Inherited output named "%s" must be filled but not declared for pipeline class "%s"' % (name, pipeline_name)))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict(((s.key, s) for s in self._output_dict.itervalues()))
all_slots = db.get(slot_key_dict.keys())
for (slot, slot_record) in zip(slot_key_dict.itervalues(), all_slots):
if (slot_record is None):
raise UnexpectedPipelineError(('Inherited output named "%s" for pipeline class "%s" is missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record) | Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore. | codesearchnet |
def get_results(self, params=None, result_id=None):
if (result_id is not None):
return [dict(i) for i in self.db.table('results').all() if (i['meta']['id'] == result_id)]
if (params is None):
return [dict(i) for i in self.db.table('results').all()]
all_params = set((['RngRun'] + self.get_params()))
param_subset = set(params.keys())
if (not all_params.issuperset(param_subset)):
raise ValueError(('%s:\nParameters: %s\nQuery: %s' % ('Specified parameter keys do not match database format', all_params, param_subset)))
query_params = {}
for key in params:
if (not isinstance(params[key], list)):
query_params[key] = [params[key]]
else:
query_params[key] = params[key]
if (not query_params.keys()):
return [dict(i) for i in self.db.table('results').all()]
query = reduce(and_, [reduce(or_, [(where('params')[key] == v) for v in value]) for (key, value) in query_params.items()])
return [dict(i) for i in self.db.table('results').search(query)] | Return all the results available from the database that fulfill some
parameter combinations.
If params is None (or not specified), return all results.
If params is specified, it must be a dictionary specifying the result
values we are interested in, with multiple values specified as lists.
For example, if the following params value is used::
params = {
'param1': 'value1',
'param2': ['value2', 'value3']
}
the database will be queried for results having param1 equal to value1,
and param2 equal to value2 or value3.
Not specifying a value for all the available parameters is allowed:
unspecified parameters are assumed to be 'free', and can take any
value.
Returns:
A list of results matching the query. Returned results have the
same structure as results inserted with the insert_result method. | codesearchnet |
def _use_gl(objs):
from ..models.plots import Plot
return _any(objs, (lambda obj: (isinstance(obj, Plot) and (obj.output_backend == 'webgl')))) | Whether a collection of Bokeh objects contains a plot requesting WebGL
Args:
objs (seq[Model or Document]) :
Returns:
bool | codesearchnet |
def import_submodules(package: Union[(str, ModuleType)], base_package_for_relative_import: str=None, recursive: bool=True) -> Dict[(str, ModuleType)]:
if isinstance(package, str):
package = importlib.import_module(package, base_package_for_relative_import)
results = {}
for (loader, name, is_pkg) in pkgutil.walk_packages(package.__path__):
full_name = ((package.__name__ + '.') + name)
log.debug('importing: {}', full_name)
results[full_name] = importlib.import_module(full_name)
if (recursive and is_pkg):
results.update(import_submodules(full_name))
return results | Import all submodules of a module, recursively, including subpackages.
Args:
package: package (name or actual module)
base_package_for_relative_import: path to prepend?
recursive: import submodules too?
Returns:
dict: mapping from full module name to module | codesearchnet |
def __get_form_data(self, soup):
elements = self.__get_valid_form_data_elements(soup)
form_data = self.__get_default_form_data_input(elements)
callback = self.options.callbacks.form_before_autofill
action = callback(self.queue_item, elements, form_data)
if action == CrawlerActions.DO_AUTOFILL_FORM:
self.__autofill_form_data(form_data, elements)
return form_data | Build a form data dict from the given form.
Args:
soup (obj): The BeautifulSoup form.
Returns:
obj: The form data (key/value). | juraj-google-style |
def expand_abbreviations(txt, fields):
def _expand(matchobj):
s = matchobj.group("var")
if s not in fields:
matches = [x for x in fields if x.startswith(s)]
if len(matches) == 1:
s = matches[0]
return "{%s}" % s
return re.sub(FORMAT_VAR_REGEX, _expand, txt) | Expand abbreviations in a format string.
If an abbreviation does not match a field, or matches multiple fields, it
is left unchanged.
Example:
>>> fields = ("hey", "there", "dude")
>>> expand_abbreviations("hello {d}", fields)
'hello dude'
Args:
txt (str): Format string.
fields (list of str): Fields to expand to.
Returns:
Expanded string. | juraj-google-style |
def depth_soil_specific_heat(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_soil_specific_heat`'.format(value))
self._depth_soil_specific_heat = value | Corresponds to IDD Field `depth_soil_specific_heat`
Args:
value (float): value for IDD Field `depth_soil_specific_heat`
Unit: J/kg-K,
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def __init__(self, filePath=None, loadData=False):
self.data = {}
self.filePath = filePath
if loadData:
self.fileLoad(updatePath=False) | Initialize the DataManager object.
Args:
filePath (Optional[str]): Relative or absolute path to a JSON
data file. Defaults to None.
loadData (Optional[bool]): Loads data from the given file path
if True. Defaults to False. | juraj-google-style |
def add_done_callback(self, fn):
with self._condition:
if (self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]):
self._done_callbacks.append(fn)
return
fn(self) | Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added. | codesearchnet |
def foldl(fn, elems, initializer=None, name=None):
return functional_ops.foldl(fn, elems, initializer=initializer, name=name) | Reduce elems using fn to combine them from left to right.
Args:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`. | github-repos |
def validate_seeded_answers_simple(answers, options, algo):
seen_options = {}
for answer in answers:
if answer:
key = options[answer['answer']].get('text')
if options[answer['answer']].get('image_url'):
key += options[answer['answer']].get('image_url')
seen_options.setdefault(key, 0)
seen_options[key] += 1
missing_options = []
index = 1
for option in options:
key = ((option.get('text') + option.get('image_url')) if option.get('image_url') else option.get('text'))
if (option.get('text') != 'n/a'):
if (seen_options.get(key, 0) == 0):
missing_options.append((_('Option ') + str(index)))
index += 1
if missing_options:
return {'seed_error': (_('Missing option seed(s): ') + ', '.join(missing_options))}
return None | This validator checks if the answers includes all possible options
Args:
answers (str): the answers to be checked
options (dict): all options that should exist in the answers
algo (str): selection algorithm
Returns:
None if everything is good. Otherwise, the missing option error message. | codesearchnet |
def export(rv, code=None, headers=None):
if isinstance(rv, ResponseBase):
return make_response(rv, code, headers)
else:
if code is None:
code = 200
mediatype = request.accept_mimetypes.best_match(
exporters.keys(), default='application/json')
return exporters[mediatype](rv, code, headers) | Create a suitable response
Args:
rv: return value of action
code: status code
headers: response headers
Returns:
flask.Response | juraj-google-style |
def aws_client(self, client_id=None):
if client_id is None:
return self._aws_clients
elif self._aws_clients is not None and self._aws_clients.has_key(client_id):
return self._aws_clients[client_id]
else:
return None | Get AWS client if it exists (must have been formerly stored with set_aws_clients)
If client_id is not provided, returns the dictionary of all clients
Args:
client_id: label for the client, e.g. 'ec2'; omit to get a dictionary of all clients
Returns:
aws client if found, or None if not | juraj-google-style |
def get_connection_id_by_endpoint(self, endpoint):
with self._connections_lock:
for connection_id in self._connections:
connection_info = self._connections[connection_id]
if connection_info.uri == endpoint:
return connection_id
raise KeyError() | Returns the connection id associated with a publically
reachable endpoint or raises KeyError if the endpoint is not
found.
Args:
endpoint (str): A zmq-style uri which identifies a publically
reachable endpoint. | juraj-google-style |
def count_matching(self, selector, offset=0):
if selector.output:
data = self.streaming_data
elif selector.buffered:
data = self.storage_data
else:
raise ArgumentError("You can only pass a buffered selector to count_matching", selector=selector)
count = 0
for i in range(offset, len(data)):
reading = data[i]
stream = DataStream.FromEncoded(reading.stream)
if selector.matches(stream):
count += 1
return count | Count the number of readings matching selector.
Args:
selector (DataStreamSelector): The selector that we want to
count matching readings for.
offset (int): The starting offset that we should begin counting at.
Returns:
int: The number of matching readings. | juraj-google-style |
def FileEntryExistsByPathSpec(self, path_spec):
tsk_vs_part, _ = tsk_partition.GetTSKVsPartByPathSpec(
self._tsk_volume, path_spec)
if tsk_vs_part is None:
location = getattr(path_spec, 'location', None)
return location is not None and location == self.LOCATION_ROOT
return True | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists or false otherwise. | juraj-google-style |
def add_file_recursive(self, filename, trim=False):
assert not self.final, 'Trying to mutate a final graph.'
self.add_source_file(filename)
queue = collections.deque([filename])
seen = set()
while queue:
filename = queue.popleft()
self.graph.add_node(filename)
try:
deps, broken = self.get_file_deps(filename)
except parsepy.ParseError:
if filename.endswith('.py'):
self.unreadable_files.add(filename)
else:
self.graph.remove_node(filename)
continue
for f in broken:
self.broken_deps[filename].add(f)
for f in deps:
if self.follow_file(f, seen, trim):
queue.append(f)
seen.add(f)
self.graph.add_node(f)
self.graph.add_edge(filename, f) | Add a file and all its recursive dependencies to the graph.
Args:
filename: The name of the file.
trim: Whether to trim the dependencies of builtin and system files. | juraj-google-style |
def expect_false(condition, msg, extras=None):
try:
asserts.assert_false(condition, msg, extras)
except signals.TestSignal as e:
logging.exception('Expected a `False` value, got `True`.')
recorder.add_error(e) | Expects an expression evaluates to False.
If the expectation is not met, the test is marked as fail after its
execution finishes.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in test
result. | juraj-google-style |
def info(self, **kwargs):
path = self._get_series_id_season_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the primary information about a TV season by its season number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API. | juraj-google-style |
def expand_repertoire(self, direction, repertoire, new_purview=None):
if (repertoire is None):
return None
purview = distribution.purview(repertoire)
if (new_purview is None):
new_purview = self.node_indices
if (not set(purview).issubset(new_purview)):
raise ValueError('Expanded purview must contain original purview.')
non_purview_indices = tuple((set(new_purview) - set(purview)))
uc = self.unconstrained_repertoire(direction, non_purview_indices)
expanded_repertoire = (repertoire * uc)
return distribution.normalize(expanded_repertoire) | Distribute an effect repertoire over a larger purview.
Args:
direction (Direction): |CAUSE| or |EFFECT|.
repertoire (np.ndarray): The repertoire to expand.
Keyword Args:
new_purview (tuple[int]): The new purview to expand the repertoire
over. If ``None`` (the default), the new purview is the entire
network.
Returns:
np.ndarray: A distribution over the new purview, where probability
is spread out over the new nodes.
Raises:
ValueError: If the expanded purview doesn't contain the original
purview. | codesearchnet |
def broadcast_dynamic_shape(shape_x: DynamicRaggedShape, shape_y: DynamicRaggedShape) -> DynamicRaggedShape:
if not isinstance(shape_x, DynamicRaggedShape):
raise TypeError('shape_x must be a DynamicRaggedShape')
if not isinstance(shape_y, DynamicRaggedShape):
raise TypeError('shape_y must be a DynamicRaggedShape')
return broadcast_dynamic_shape_extended(shape_x, shape_y)[0] | Returns the shape formed by broadcasting two shapes to be compatible.
1. If shape_x and shape_y both have row_partitions, then fail if their dtypes
don't match.
2. If neither has row_partitions and they have different dtypes,
go with int64.
3. If one has row_partitions, go with that dtype.
Args:
shape_x: A `DynamicRaggedShape`
shape_y: A `DynamicRaggedShape`
Returns:
A `DynamicRaggedShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. | github-repos |
def download_folder(bucket_name, prefix, target, sagemaker_session):
boto_session = sagemaker_session.boto_session
s3 = boto_session.resource('s3')
bucket = s3.Bucket(bucket_name)
prefix = prefix.lstrip('/')
objects = list(bucket.objects.filter(Prefix=prefix))
if ((len(objects) > 0) and (objects[0].key == prefix) and (prefix[(- 1)] != '/')):
s3.Object(bucket_name, prefix).download_file(os.path.join(target, os.path.basename(prefix)))
return
for obj_sum in bucket.objects.filter(Prefix=prefix):
if ((obj_sum.key != '') and (obj_sum.key[(- 1)] == '/')):
continue
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
s3_relative_path = obj_sum.key[len(prefix):].lstrip('/')
file_path = os.path.join(target, s3_relative_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise
obj.download_file(file_path) | Download a folder from S3 to a local path
Args:
bucket_name (str): S3 bucket name
prefix (str): S3 prefix within the bucket that will be downloaded. Can be a single file.
target (str): destination path where the downloaded items will be placed
sagemaker_session (:class:`sagemaker.session.Session`): a sagemaker session to interact with S3. | codesearchnet |
def zero_or_more(e, delimiter=None):
if (delimiter is None):
delimiter = (lambda s, grm, pos: (s, Ignore, (pos, pos)))
def match_zero_or_more(s, grm=None, pos=0):
start = pos
try:
(s, obj, span) = e(s, grm, pos)
pos = span[1]
data = ([] if (obj is Ignore) else [obj])
except PegreError:
return PegreResult(s, [], (pos, pos))
try:
while True:
(s, obj, span) = delimiter(s, grm, pos)
pos = span[1]
if (obj is not Ignore):
data.append(obj)
(s, obj, span) = e(s, grm, pos)
pos = span[1]
if (obj is not Ignore):
data.append(obj)
except PegreError:
pass
return PegreResult(s, data, (start, pos))
return match_zero_or_more | Create a PEG function to match zero or more expressions.
Args:
e: the expression to match
delimiter: an optional expression to match between the
primary *e* matches. | codesearchnet |
def run_cm(cm, time_scale):
cm = np.linalg.matrix_power(cm, time_scale)
cm[cm > 1] = 1
return cm | Iterate a connectivity matrix the specified number of steps.
Args:
cm (np.ndarray): A connectivity matrix.
time_scale (int): The number of steps to run.
Returns:
np.ndarray: The connectivity matrix at the new timescale. | juraj-google-style |
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding, data_format, expected, use_gpu, v2, use_negative_input=False):
if data_format == 'NCHW_VECT_C':
avg_pool_func = nn_ops.avg_pool
tf_logging.info('pool_func=%s', pool_func)
if pool_func == avg_pool_func:
tf_logging.info('NCHW_VECT_C not yet implemented for avg_pool')
return
if self._isMaxPool(pool_func) and isinstance(padding, list):
tf_logging.info('NCHW_VECT_C not yet implemented for max pool' + ' with explicit padding')
return
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float32, expected, use_gpu, v2, use_negative_input)
if not test.is_built_with_rocm():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float64, expected, use_gpu, v2, use_negative_input)
if not use_gpu or test_util.GpuSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding, data_format, dtypes.float16, expected, use_gpu, v2, use_negative_input) | Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
v2: Whether to use v2 version.
use_negative_input: If the input values should be negative." | github-repos |
def get_access_token(self, http=None, additional_claims=None):
if additional_claims is None:
if self.access_token is None or self.access_token_expired:
self.refresh(None)
return client.AccessTokenInfo(
access_token=self.access_token, expires_in=self._expires_in())
else:
token, unused_expiry = self._create_token(additional_claims)
return client.AccessTokenInfo(
access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS) | Create a signed jwt.
Args:
http: unused
additional_claims: dict, additional claims to add to
the payload of the JWT.
Returns:
An AccessTokenInfo with the signed jwt | juraj-google-style |
def operation_at(self, qubit: ops.Qid, moment_index: int) -> Optional[ops.Operation]:
if (not (0 <= moment_index < len(self._moments))):
return None
for op in self._moments[moment_index].operations:
if (qubit in op.qubits):
return op
return None | Finds the operation on a qubit within a moment, if any.
Args:
qubit: The qubit to check for an operation on.
moment_index: The index of the moment to check for an operation
within. Allowed to be beyond the end of the circuit.
Returns:
None if there is no operation on the qubit at the given moment, or
else the operation. | codesearchnet |
def repl(optimize=True, persist=True):
print("Extra commands for the REPL:")
print(".code - print code")
print(".raw - print raw code")
print(".quit - exit immediately")
print(".reset - reset machine (IP and stacks)")
print(".restart - create a clean, new machine")
print(".clear - same as .restart")
print(".stack - print data stack")
print("")
machine = Machine([])
def match(s, *args):
return any(map(lambda arg: s.strip()==arg, args))
while True:
try:
source = raw_input("> ").strip()
if source[0] == "." and len(source) > 1:
if match(source, ".quit"):
return
elif match(source, ".code"):
print_code(machine)
elif match(source, ".raw"):
print(machine.code)
elif match(source, ".reset"):
machine.reset()
elif match(source, ".restart", ".clear"):
machine = Machine([])
elif match(source, ".stack"):
print(machine.stack)
else:
raise ParseError("Unknown command: %s" % source)
continue
code = compile(parse(source), silent=False, optimize=optimize)
if not persist:
machine.reset()
machine.code += code
machine.run()
except EOFError:
return
except KeyboardInterrupt:
return
except ParseError as e:
print("Parse error: %s" % e)
except MachineError as e:
print("Machine error: %s" % e)
except CompileError as e:
print("Compile error: %s" % e) | Starts a simple REPL for this machine.
Args:
optimize: Controls whether to run inputted code through the
optimizer.
persist: If True, the machine is not deleted after each line. | juraj-google-style |
def __init__(self, shard_context, shard_state, tstate):
self._tstate = tstate
self.job_context = shard_context.job_context
self.shard_context = shard_context
self.number = shard_state.slice_id
self.attempt = shard_state.slice_retries + 1 | Init.
The signature of __init__ is subject to change.
Read only properties:
job_context: JobContext object.
shard_context: ShardContext object.
number: int. slice number. 0 indexed.
attempt: int. The current attempt at executing this slice.
starting at 1.
Args:
shard_context: map_job.JobConfig.
shard_state: model.ShardState.
tstate: model.TransientShardstate. | juraj-google-style |
def NewEvent(
type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None
) -> NewEventData:
return NewEventData(id or uuid4(), type, data, metadata) | Build the data structure for a new event.
Args:
type: An event type.
id: The uuid identifier for the event.
data: A dict containing data for the event. These data
must be json serializable.
metadata: A dict containing metadata about the event.
These must be json serializable. | juraj-google-style |
def single_qubit_matrix_to_phased_x_z(
mat: np.ndarray,
atol: float = 0
) -> List[ops.SingleQubitGate]:
xy_turn, xy_phase_turn, total_z_turn = (
_deconstruct_single_qubit_matrix_into_gate_turns(mat))
result = [
ops.PhasedXPowGate(exponent=2 * xy_turn,
phase_exponent=2 * xy_phase_turn),
ops.Z**(2 * total_z_turn)
]
result = [
g for g in result
if protocols.trace_distance_bound(g) > atol
]
if len(result) == 2 and abs(xy_turn) >= 0.5 - atol:
return [
ops.PhasedXPowGate(phase_exponent=2 * xy_phase_turn + total_z_turn)
]
return result | Implements a single-qubit operation with a PhasedX and Z gate.
If one of the gates isn't needed, it will be omitted.
Args:
mat: The 2x2 unitary matrix of the operation to implement.
atol: A limit on the amount of error introduced by the
construction.
Returns:
A list of gates that, when applied in order, perform the desired
operation. | juraj-google-style |
def SummaryMetadata(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag) | Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf. | juraj-google-style |
def get_params(img, output_size):
(w, h) = img.size
(th, tw) = output_size
if ((w == tw) and (h == th)):
return (0, 0, h, w)
i = random.randint(0, (h - th))
j = random.randint(0, (w - tw))
return (i, j, th, tw) | Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. | codesearchnet |
def _flat_shapes(self):
return structure.get_flat_tensor_shapes(self.element_spec) | Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation. | github-repos |
def variant_case(store, case_obj, variant_obj):
case_obj['bam_files'] = []
case_obj['mt_bams'] = []
case_obj['bai_files'] = []
case_obj['mt_bais'] = []
case_obj['sample_names'] = []
for individual in case_obj['individuals']:
bam_path = individual.get('bam_file')
mt_bam = individual.get('mt_bam')
case_obj['sample_names'].append(individual.get('display_name'))
if bam_path and os.path.exists(bam_path):
case_obj['bam_files'].append(individual['bam_file'])
case_obj['bai_files'].append(find_bai_file(individual['bam_file']))
if mt_bam and os.path.exists(mt_bam):
case_obj['mt_bams'].append(individual['mt_bam'])
case_obj['mt_bais'].append(find_bai_file(individual['mt_bam']))
else:
LOG.debug("%s: no bam file found", individual['individual_id'])
try:
genes = variant_obj.get('genes', [])
if len(genes) == 1:
hgnc_gene_obj = store.hgnc_gene(variant_obj['genes'][0]['hgnc_id'])
if hgnc_gene_obj:
vcf_path = store.get_region_vcf(case_obj, gene_obj=hgnc_gene_obj)
case_obj['region_vcf_file'] = vcf_path
else:
case_obj['region_vcf_file'] = None
elif len(genes) > 1:
chrom = variant_obj['genes'][0]['common']['chromosome']
start = min(gene['common']['start'] for gene in variant_obj['genes'])
end = max(gene['common']['end'] for gene in variant_obj['genes'])
vcf_path = store.get_region_vcf(case_obj, chrom=chrom, start=start, end=end)
case_obj['region_vcf_file'] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") | Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant) | juraj-google-style |
def rotate(self, vector):
if isinstance(vector, Quaternion):
return self._rotate_quaternion(vector)
q = Quaternion(vector=vector)
a = self._rotate_quaternion(q).vector
if isinstance(vector, list):
l = [x for x in a]
return l
elif isinstance(vector, tuple):
l = [x for x in a]
return tuple(l)
else:
return a | Rotate a 3D vector by the rotation stored in the Quaternion object.
Params:
vector: A 3-vector specified as any ordered sequence of 3 real numbers corresponding to x, y, and z values.
Some types that are recognised are: numpy arrays, lists and tuples.
A 3-vector can also be represented by a Quaternion object who's scalar part is 0 and vector part is the required 3-vector.
Thus it is possible to call `Quaternion.rotate(q)` with another quaternion object as an input.
Returns:
The rotated vector returned as the same type it was specified at input.
Raises:
TypeError: if any of the vector elements cannot be converted to a real number.
ValueError: if `vector` cannot be interpreted as a 3-vector or a Quaternion object. | codesearchnet |
def groups_from_tag(self, group, tag_name, filters=None, params=None):
for t in self.pivot_from_tag(group, tag_name, filters=filters, params=params):
yield t | Args:
group:
tag_name:
filters:
params:
Return: | juraj-google-style |
def get_by_name(self, name):
san_managers = self._client.get_all()
result = [x for x in san_managers if x['name'] == name]
return result[0] if result else None | Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager. | juraj-google-style |
def get_adif_id(self, callsign, timestamp=timestamp_now):
return self.get_all(callsign, timestamp)[const.ADIF] | Returns ADIF id of a callsign's country
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the country ADIF id
Raises:
KeyError: No Country found for callsign | codesearchnet |
def to_json_string(self, indent=None):
root_ids = []
for r in self._roots:
root_ids.append(r.id)
root_references = self._all_models.values()
json = {'title': self.title, 'roots': {'root_ids': root_ids, 'references': references_json(root_references)}, 'version': __version__}
return serialize_json(json, indent=indent) | Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str | codesearchnet |
def initialize_means(data, clusters, k):
init_w = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_w[:,i] = data[:,point].toarray().flatten()
else:
init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() + eps
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_w[:,i] = data[:,point].flatten()
else:
init_w[:,i] = data[:,clusters==i].mean(1) + eps
return init_w | Initializes the M matrix given the data and a set of cluster labels.
Cluster centers are set to the mean of each cluster.
Args:
data (array): genes x cells
clusters (array): 1d array of ints (0...k-1)
k (int): number of clusters | juraj-google-style |
def keys(self):
all_keys = [k.decode('utf-8') for (k, v) in self.rdb.hgetall(self.session_hash).items()]
return all_keys | Return a list of all keys in the dictionary.
Returns:
list of str: [key1,key2,...,keyN] | codesearchnet |
def _IsBase64(cls, s):
try:
if (base64.b64encode(base64.b64decode(s)).decode('utf-8') == s):
return True
except (TypeError, binascii.Error):
pass
return False | An imperfect but decent method for determining if a string is base64.
Args:
s: A string with the data to test.
Returns:
True if s is base64, else False. | codesearchnet |
def save_chkpt_vars(dic, path):
logger.info('Variables to save to {}:'.format(path))
keys = sorted(list(dic.keys()))
logger.info(pprint.pformat(keys))
assert (not path.endswith('.npy'))
if path.endswith('.npz'):
np.savez_compressed(path, **dic)
else:
with tf.Graph().as_default(), tf.Session() as sess:
for (k, v) in six.iteritems(dic):
k = get_op_tensor_name(k)[0]
_ = tf.Variable(name=k, initial_value=v)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, path, write_meta_graph=False) | Save variables in dic to path.
Args:
dic: {name: value}
path: save as npz if the name ends with '.npz', otherwise save as a checkpoint. | codesearchnet |
def read(self, *, level=0, alignment=1) -> bytes:
return self.mglo.read(level, alignment) | Read the content of the texture into a buffer.
Keyword Args:
level (int): The mipmap level.
alignment (int): The byte alignment of the pixels.
Returns:
bytes | codesearchnet |
def guess_base_branch():
my_branch = current_branch(refresh=True).name
curr = latest_commit()
if (len(curr.branches) > 1):
other = [x for x in curr.branches if (x != my_branch)]
if (len(other) == 1):
return other[0]
return None
else:
parent = curr
while (parent and (my_branch in parent.branches)):
curr = parent
if (len(curr.branches) > 1):
other = [x for x in curr.branches if (x != my_branch)]
if (len(other) == 1):
return other[0]
return None
parents = [p for p in curr.parents if (my_branch in p.branches)]
num_parents = len(parents)
if (num_parents > 2):
return None
if (num_parents == 2):
for p in parents:
if (p.branches == [my_branch]):
parent = p
break
elif (num_parents == 1):
parent = parents[0]
elif (num_parents == 0):
parent = None
return None | Try to guess the base branch for the current branch.
Do not trust this guess. git makes it pretty much impossible to guess
the base branch reliably so this function implements few heuristics that
will work on most common use cases but anything a bit crazy will probably
trip this function.
Returns:
Optional[str]: The name of the base branch for the current branch if
guessable or **None** if can't guess. | codesearchnet |
def set_zone(timezone):
if (timezone.lower() in mapper.win_to_unix):
win_zone = timezone
elif (timezone.lower() in mapper.unix_to_win):
win_zone = mapper.get_win(timezone)
else:
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting timezone: {0}'.format(timezone), info=res)
return zone_compare(timezone) | Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver' | codesearchnet |
def deregister(cls, name: str) -> None:
if name not in cls.available:
raise ConnectionPluginNotRegistered(
f"Connection {name!r} is not registered"
)
cls.available.pop(name) | Deregisters a registered connection plugin by its name
Args:
name: name of the connection plugin to deregister
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginNotRegistered` | juraj-google-style |
def remove(self, repl_id):
repl = self._storage.pop(repl_id)
repl.cleanup()
del(repl) | remove replica set with kill members
Args:
repl_id - replica set identity
return True if operation success otherwise False | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.