code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def update_target_state(self, value: str, force: bool=True) -> datetime:
value = value.lower()
if (not force):
current_state = self.current_state
if (current_state == 'unknown'):
raise RuntimeError("Unable to set target state when current state is 'unknown'")
allowed_target_s... | Set the target state.
Args:
value (str): New value for target state
force (bool): If true, ignore allowed transitions
Returns:
datetime, update timestamp
Raises:
RuntimeError, if it is not possible to currently set the target
state.
ValueError, if the specified target stat is not allowed. | codesearchnet |
def add_object_to_scope(self, obj):
if isinstance(obj, Computer):
self.add_object_to_path(obj, 'scope/computers')
elif isinstance(obj, ComputerGroup):
self.add_object_to_path(obj, 'scope/computer_groups')
elif isinstance(obj, Building):
self.add_object_to_path(obj, 'scope/buildings')... | Add an object to the appropriate scope block.
Args:
obj: JSSObject to add to scope. Accepted subclasses are:
Computer
ComputerGroup
Building
Department
Raises:
TypeError if invalid obj type is provided. | codesearchnet |
def _format_src_url(self, path, caller_system):
path = '%s/%s' % (self._endpoint, self.relpath(path))
if caller_system is not self:
try:
path = '%s?%s' % (path, self._storage_parameters['sas_token'])
except KeyError:
pass
... | Ensure path is absolute and use the correct URL format for use with
cross Azure storage account copy function.
Args:
path (str): Path or URL.
caller_system (pycosio.storage.azure._AzureBaseSystem subclass):
System calling this method (Can be another Azure system).
Returns:
str: URL. | juraj-google-style |
def get_temporary_scripts_path(self):
result = None
if (len(self.config.temporary_scripts_path) > 0):
if os.path.isdir(self.config.temporary_scripts_path):
result = self.config.temporary_scripts_path
return result | Get path for temporary scripts.
Returns:
str: path for temporary scripts or None if not set | codesearchnet |
def _execute(self, command, data=None, unpack=True):
if (not data):
data = {}
if (self.session_id is not None):
data.setdefault('session_id', self.session_id)
data = self._wrap_el(data)
res = self.remote_invoker.execute(command, data)
ret = WebDriverResult.from_object(res)
ret.ra... | Private method to execute command.
Args:
command(Command): The defined command.
data(dict): The uri variable and body.
uppack(bool): If unpack value from result.
Returns:
The unwrapped value field in the json response. | codesearchnet |
def add_functions(spec_dict: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
spec_dict['functions']['list'] = []
spec_dict['functions']['list_long'] = []
spec_dict['functions']['list_short'] = []
spec_dict['functions']['primary'] = {}
spec_dict['functions']['primary']['list_long'] = []
spec_dict['f... | Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys | codesearchnet |
def combine_first_two_dimensions(x):
ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
a, b = old_shape[:2]
new_shape = [a * b if a and b else None] + old_shape[2:]
ret.set_shape(new_shape)
return ret | Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...] | juraj-google-style |
def _head(self, client_kwargs):
with _handle_oss_error():
bucket = self._get_bucket(client_kwargs)
if 'key' in client_kwargs:
return bucket.head_object(
key=client_kwargs['key']).headers
return bucket.ge... | Returns object HTTP header.
Args:
client_kwargs (dict): Client arguments.
Returns:
dict: HTTP header. | juraj-google-style |
def Process(self, parser_mediator, cache=None, database=None, **unused_kwargs):
if (cache is None):
raise ValueError('Missing cache value.')
if (database is None):
raise ValueError('Missing database value.')
super(SQLitePlugin, self).Process(parser_mediator)
for (query, callback_method) ... | Determine if this is the right plugin for this database.
This function takes a SQLiteDatabase object and compares the list
of required tables against the available tables in the database.
If all the tables defined in REQUIRED_TABLES are present in the
database then this plugin is considered to be the correct plugin
an... | codesearchnet |
def solve(self, print_solution=False):
self._cp_solver = cp_model.CpSolver()
status = self._cp_solver.Solve(self._model)
if status != cp_model.OPTIMAL:
if status == cp_model.FEASIBLE:
logging.warning("A potentially suboptimal solution was found.")
else:
logging.error("S... | Solves the current integer program and returns the computed layout.
Args:
print_solution: An optional boolean indicating whether to print the full
solution in human-readable format.
Returns:
The computed layout (as a string).
Raises:
SolverError: the internal solver could not find a solution, or the
solution found i... | juraj-google-style |
def __init__(self, cl_environments=None, compile_flags=None, double_precision=None):
super().__init__()
self._cl_environments = cl_environments
self._compile_flags = compile_flags
self._double_precision = double_precision | Updates the runtime settings.
Args:
cl_environments (list of CLEnvironment): the new CL environments we wish to use for future computations
compile_flags (list): the list of compile flags to use during analysis.
double_precision (boolean): if we compute in double precision or not | juraj-google-style |
def __init__(self, client_path, data, chunk_index, total_chunks, offset,
total_size):
self.client_path = client_path
self.data = data
self.offset = offset
self.total_size = total_size
self.chunk_index = chunk_index
self.total_chunks = total_chunks | Initializes StreamedFileChunk object.
Args:
client_path: db.ClientPath identifying the file.
data: bytes with chunk's contents.
chunk_index: Index of this chunk (relative to the sequence of chunks
corresponding to the file).
total_chunks: Total number of chunks corresponding to a given file.
offset: Offset of this chu... | juraj-google-style |
def tcp_ping(task: Task, ports: List[int], timeout: int=2, host: Optional[str]=None) -> Result:
if isinstance(ports, int):
ports = [ports]
if isinstance(ports, list):
if (not all((isinstance(port, int) for port in ports))):
raise ValueError("Invalid value for 'ports'")
else:
... | Tests connection to a tcp port and tries to establish a three way
handshake. To be used for network discovery or testing.
Arguments:
ports (list of int): tcp ports to ping
timeout (int, optional): defaults to 2
host (string, optional): defaults to ``hostname``
Returns:
Result object with the following attributes set... | codesearchnet |
def publishCombinedWebMap(self, maps_info, webmaps):
if self.securityhandler is None:
print ("Security handler required")
return
admin = None
map_results = None
map_info = None
operationalLayers = None
tableLayers = None
item = Non... | Publishes a combination of web maps.
Args:
maps_info (list): A list of JSON configuration combined web maps to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`. | juraj-google-style |
def get_message(routing_key, properties, body):
if properties.headers is None:
_log.error(
"Message (body=%r) arrived without headers. " "A publisher is misbehaving!",
body,
)
properties.headers = {}
try:
MessageClass = get_class(properties.headers["... | Construct a Message instance given the routing key, the properties and the
body received from the AMQP broker.
Args:
routing_key (str): The AMQP routing key (will become the message topic)
properties (pika.BasicProperties): the AMQP properties
body (bytes): The encoded message body
Raises:
ValidationError: If Message... | juraj-google-style |
def get_random_distorted_bottlenecks(sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor):
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.ran... | Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distor... | codesearchnet |
def _compress_hextets(cls, hextets):
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon... | Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets... | juraj-google-style |
def oem(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
res = self._dll.JLINKARM_GetOEMString(ctypes.byref(buf))
if (res != 0):
raise errors.JLinkException('Failed to grab OEM string.')
oem = ctypes.string_at(buf).decode()
if (len(oem) == 0):
return None
return oem | Retrieves and returns the OEM string of the connected J-Link.
Args:
self (JLink): the ``JLink`` instance
Returns:
The string of the OEM. If this is an original SEGGER product, then
``None`` is returned instead.
Raises:
JLinkException: on hardware error. | codesearchnet |
def _kl_bernoulli_bernoulli(a, b, name=None):
with tf.name_scope(name or "kl_bernoulli_bernoulli"):
delta_probs0 = tf.nn.softplus(-b.logits) - tf.nn.softplus(-a.logits)
delta_probs1 = tf.nn.softplus(b.logits) - tf.nn.softplus(a.logits)
return (tf.sigmoid(a.logits) * delta_probs0
+ tf.sigmoi... | Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.
Args:
a: instance of a Bernoulli distribution object.
b: instance of a Bernoulli distribution object.
name: (optional) Name to use for created operations.
default is "kl_bernoulli_bernoulli".
Returns:
Batchwise KL(a || b) | juraj-google-style |
async def remember_ticket(self, request, ticket):
session = (await get_session(request))
session[self.cookie_name] = ticket | Called to store the ticket data for a request.
Ticket data is stored in the aiohttp_session object
Args:
request: aiohttp Request object.
ticket: String like object representing the ticket to be stored. | codesearchnet |
def get_contacts(self, issue):
if (not issue.resource):
return []
account_contacts = issue.resource.account.contacts
try:
resource_owners = issue.resource.get_owner_emails()
if (type(resource_owners) is list):
for resource_owner in resource_owners:
account... | Returns a list of contacts for an issue
Args:
issue (:obj:`RequiredTagsIssue`): Issue record
Returns:
`list` of `dict` | codesearchnet |
def get_files_re(self, file_re, full_path=False, ignorecase=False):
try:
if ignorecase:
compiled_re = re.compile(file_re, re.I)
else:
compiled_re = re.compile(file_re)
except sre_constants.error:
logger.error('Failed to compile regex: {}.'.format(file_re))
... | Finds all files that match file_re and returns their list.
Doesn't return directories, only files.
Args:
file_re: raw string to match files against (gets compiled into re)
full_path: whether to match against full path inside the archive
or just the filenames
ignorecase: whether to ignore case when using the given re
R... | codesearchnet |
def GetScriptHashesForVerifying(self):
if (self.PrevHash.Data == bytearray(32)):
if (type(self.Script.VerificationScript) is bytes):
return [bytearray(self.Script.VerificationScript)]
elif (type(self.Script.VerificationScript) is bytearray):
return [self.Script.VerificationSc... | Get the script hash used for verification.
Raises:
Exception: if the verification script is invalid, or no header could be retrieved from the Blockchain.
Returns:
list: with a single UInt160 representing the next consensus node. | codesearchnet |
def list_from_file(filename, prefix='', offset=0, max_num=0):
cnt = 0
item_list = []
with open(filename, 'r') as f:
for _ in range(offset):
f.readline()
for line in f:
if max_num > 0 and cnt >= max_num:
break
item_list.append(prefix + ... | Load a text file and parse the content as a list of strings.
Args:
filename (str): Filename.
prefix (str): The prefix to be inserted to the begining of each item.
offset (int): The offset of lines.
max_num (int): The maximum number of lines to be read,
zeros and negatives mean no limitation.
Returns:
list[str]: A lis... | juraj-google-style |
def tf_next_step(self, x, iteration, conjugate, residual, squared_residual):
next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual)
return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon)) | Termination condition: max number of iterations, or residual sufficiently small.
Args:
x: Current solution estimate $x_t$.
iteration: Current iteration counter $t$.
conjugate: Current conjugate $c_t$.
residual: Current residual $r_t$.
squared_residual: Current squared residual $r_t^2$.
Returns:
True if another iterat... | juraj-google-style |
def partial_derivative_sigma(mu, sigma, low, high, data):
pd_sigma = np.sum(((- (1 / sigma)) + (((data - mu) ** 2) / (sigma ** 3))))
pd_sigma -= (len(data) * ((((low - mu) * norm.pdf(low, mu, sigma)) - ((high - mu) * norm.pdf(high, mu, sigma))) / (sigma * (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))... | The partial derivative with respect to the standard deviation.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to ca... | codesearchnet |
def assemble(self, ops):
return pwnypack.asm.asm(self.compile(ops), target=self.target) | Assemble a list of operations into executable code.
Arguments:
ops(list): A list of shellcode operations.
Returns:
bytes: The executable code that implements the shellcode. | juraj-google-style |
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
egg_project_name = pkg_resources.to_filename(project_name)
req = '{}@{}
if subdir:
req += '&subdirectory={}'.format(subdir)
return req | Return the URL for a VCS requirement.
Args:
repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
project_name: the (unescaped) project name. | juraj-google-style |
def get_max_atten(self):
return self.attenuation_device.max_atten | Gets the max attenuation supported by the Attenuator.
Returns:
A float that is the max attenuation value. | github-repos |
def UpdateClass(self, class_name, gtfs_class):
if class_name not in self._class_mapping:
raise problems.NonexistentMapping(class_name)
self._class_mapping[class_name] = gtfs_class | Updates an entry in the list of known classes.
Args:
class_name: A string with the class name that is to be updated.
gtfs_class: The new class
Raises:
NonexistentMapping if there is no class with the specified class_name. | juraj-google-style |
def feedforward(inputs,
num_units,
scope="multihead_attention"):
with tf.variable_scope(scope):
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1... | Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3d tensor with the same shape and dtype as inputs | juraj-google-style |
def _scale_boxes(boxes, target_sizes):
if isinstance(target_sizes, (list, tuple)):
image_height = torch.tensor([i[0] for i in target_sizes])
image_width = torch.tensor([i[1] for i in target_sizes])
elif isinstance(target_sizes, torch.Tensor):
image_height, image_width = target_sizes.unbi... | Scale batch of bounding boxes to the target sizes.
Args:
boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):
Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.
target_sizes (`List[Tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):
Target sizes to scale the boxes to.... | github-repos |
def Lookup(self, keywords, start_time=FIRST_TIMESTAMP, end_time=LAST_TIMESTAMP, last_seen_map=None):
posting_lists = self.ReadPostingLists(keywords, start_time=start_time, end_time=end_time, last_seen_map=last_seen_map)
results = list(itervalues(posting_lists))
relevant_set = results[0]
for hits in resu... | Finds objects associated with keywords.
Find the names related to all keywords.
Args:
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is... | codesearchnet |
def get_attribute(self, node, obj, name, valself=None):
obj = abstract_utils.unwrap_final(obj)
special_attribute = obj.get_special_attribute(node, name, valself)
if special_attribute is not None:
return (node, special_attribute)
if isinstance(obj, abstract.Function):
if name == '__get__'... | Get the named attribute from the given object.
Args:
node: The current CFG node.
obj: The object.
name: The name of the attribute to retrieve.
valself: A cfg.Binding to a self reference to include in the attribute's
origins. If obj is an abstract.Class, valself can be a binding to:
* an instance of obj - obj will be t... | github-repos |
def construct_gene_object(ensembl, transcript_id):
(chrom, start, end, strand, genomic_sequence) = ensembl.get_genomic_seq_for_transcript(transcript_id, expand=10)
cds_sequence = ensembl.get_cds_seq_for_transcript(transcript_id)
cds_ranges = ensembl.get_cds_ranges_for_transcript(tra... | creates an Transcript object for a gene from ensembl databases
Args:
ensembl: EnsemblRequest object to request data from ensembl
transcript_id: string for an Ensembl transcript ID
Returns:
a Transcript object, containing transcript coordinates and gene and
transcript sequence.
Raises:
ValueError if CDS from genomic ... | juraj-google-style |
def __init__(self, file_system, tsk_attribute):
super(TSKDataStream, self).__init__()
self._file_system = file_system
self._tsk_attribute = tsk_attribute | Initializes a data stream.
Args:
file_system (TSKFileSystem): file system.
tsk_attribute (pytsk3.Attribute): TSK attribute. | juraj-google-style |
def _call_method_from_namespace(obj, method_name, namespace):
method = getattr(obj, method_name)
method_parser = method.parser
arg_names = _get_args_name_from_parser(method_parser)
if (method_name == '__init__'):
return _call(obj, arg_names, namespace)
return _call(method, arg_names, namespa... | Call the method, retrieved from obj, with the correct arguments via
the namespace
Args:
obj: any kind of object
method_name: method to be called
namespace: an argparse.Namespace object containing parsed command
line arguments | codesearchnet |
def append_to_history(self, filename, command, go_to_eof):
if not is_text_string(filename):
filename = to_text_string(filename.toUtf8(), 'utf-8')
command = to_text_string(command)
index = self.filenames.index(filename)
self.editors[index].append(command)
if ... | Append an entry to history filename.
Args:
filename (str): file to be updated in a new tab.
command (str): line to be added.
go_to_eof (bool): scroll to the end of file. | juraj-google-style |
def _CompressionSizeDelta(self, records, options_a, options_b):
fn_a = self._WriteRecordsToFile(records, 'tfrecord_a', options=options_a)
test_a = list(tf_record.tf_record_iterator(fn_a, options=options_a))
self.assertEqual(records, test_a, options_a)
fn_b = self._WriteRecordsToFile(records, 'tfrecord_b... | Validate compression with options_a and options_b and return size delta.
Compress records with options_a and options_b. Uncompress both compressed
files and assert that the contents match the original records. Finally
calculate how much smaller the file compressed with options_a was than the
file compressed with optio... | github-repos |
def init_datapackage(resource_paths):
dp = datapackage.Package({'name': 'change-me', 'schema': 'tabular-data-package'})
for path in resource_paths:
dp.infer(path)
return dp | Create tabular data package with resources.
It will also infer the tabular resources' schemas.
Args:
resource_paths (List[str]): Paths to the data package resources.
Returns:
datapackage.Package: The data package. | codesearchnet |
def process_test_logs(name, test_name, test_args, benchmark_type, start_time, run_time, log_files):
results = test_log_pb2.TestResults()
results.name = name
results.target = test_name
results.start_time = start_time
results.run_time = run_time
results.benchmark_type = test_log_pb2.TestResults.Be... | Gather test information and put it in a TestResults proto.
Args:
name: Benchmark target identifier.
test_name: A unique bazel target, e.g. "//path/to:test"
test_args: A string containing all arguments to run the target with.
benchmark_type: A string representing the BenchmarkType enum; the
benchmark type for this targ... | github-repos |
def serialize_data(data, compression=False, encryption=False, public_key=None):
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if (encryption and public_key):
message = encryption.encrypt(message, public_key)
enc... | Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (b... | codesearchnet |
def __init__(self, raw_string, bow=True):
self.raw = raw_string
self.as_list = list(self.raw)
self.as_np = np.array(self.as_list)
self.string_start = np.arange(len(self.raw))
vocab = {}
self.inverse_vocab = []
self.positions = []
self.bow = bow
... | Initializer.
Args:
raw_string: string with raw text in it
bow: if True, a char is the same everywhere in the text - i.e. we
will index multiple occurrences of the same character. If False,
order matters, so that the same word will have different ids
according to position. | juraj-google-style |
def compile_intermediate_cpfs(self, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[Noise]=None) -> List[CPFPair]:
interm_fluents = []
with self.graph.as_default():
with tf.name_scope('intermediate_cpfs'):
for cpf in self.rddl.domain.intermediate_cpfs:
... | Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. | codesearchnet |
def rename_document(self, did, name):
payload = {
'name': name
}
return self._api.request('post', '/api/documents/' + did, body=payload) | Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data | juraj-google-style |
def get_page_artid_for_publication_info(publication_info, separator):
if 'artid' in publication_info:
return publication_info['artid']
elif 'page_start' in publication_info and 'page_end' in publication_info:
page_start = publication_info['page_start']
page_... | Return the page range or the article id of a publication_info entry.
Args:
publication_info(dict): a publication_info field entry of a record
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> publication_info = ... | juraj-google-style |
def generate_examples(options):
_prepare_dir(options)
out = options.zip_to_output
if options.multi_gen_state:
test_name = options.multi_gen_state.test_name
else:
test_name = re.sub('(_(|with-flex|forward-compat|edgetpu|mlir-quant))?(_xnnpack)?\\.zip$', '', out, count=1)
test_function... | Generate examples for a test set.
Args:
options: Options containing information to generate examples.
Raises:
RuntimeError: if the test function cannot be found. | github-repos |
def __contains__(self, item):
try:
_libexec('merkle_db_contains', self.pointer,
item.encode())
return True
except KeyError:
return False | Does the tree contain an address.
Args:
item (str): An address.
Returns:
(bool): True if it does contain, False otherwise. | juraj-google-style |
def from_row_starts(cls, row_starts, nvals, validate=True, dtype=None, dtype_hint=None):
if not isinstance(validate, bool):
raise TypeError('validate must have type bool')
with ops.name_scope(None, 'RowPartitionFromRowStarts', [row_starts]):
row_starts = cls._convert_row_partition(row_starts, 'r... | Creates a `RowPartition` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`.
Args:
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
nvals: A scalar tens... | github-repos |
def impersonate(self, name=None, lifetime=None, mechs=None, usage='initiate'):
if (rcred_s4u is None):
raise NotImplementedError('Your GSSAPI implementation does not have support for S4U')
res = rcred_s4u.acquire_cred_impersonate_name(self, name, lifetime, mechs, usage)
return type(self)(base=res.cr... | Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :... | codesearchnet |
def update_state(world):
world_size = len(world)
def wrap(index):
return index % world_size
for x in range(world_size):
for y in range(world_size):
if not world[x][y].allow_change.get():
continue
live_neighbor_count = sum(... | Increment the world state, determining which cells live, die, or appear.
Args:
world (list[list]): A square matrix of cells
Returns: None | juraj-google-style |
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)... | Adds an Inception-ResNet block.
Args:
x: input tensor.
scale: scaling factor to scale the residuals
(i.e., the output of passing `x` through an inception module)
before adding them to the shortcut
branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'bl... | github-repos |
def diffusion_mds(means, weights, d, diffusion_rounds=10):
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) | juraj-google-style |
def ParseInteger(text, is_signed=False, is_long=False):
try:
if is_long:
result = long(text, 0)
else:
result = int(text, 0)
except ValueError:
raise ValueError(("Couldn't parse integer: %s" % text))
checker = _INTEGER_CHECKERS[((2 * int(is_long)) + int(is_sign... | Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer. | codesearchnet |
def get_dict(self, name, default=None):
if (name not in self):
if (default is not None):
return default
raise EnvironmentError.not_found(self._prefix, name)
return dict(**self.get(name)) | Retrieves an environment variable value as a dictionary.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
dict: The environment variable's value as a ``dict``.
Raises:
EnvironmentError: If the e... | codesearchnet |
def __init__(self, flow, **kwargs):
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200) | Initialize the object
Args:
flow: :class:`Flow` object
max_njobs_inqueue: The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs | juraj-google-style |
def translate_array(self, string, language, level=3, retdata=False):
language = language.lower()
assert (self.is_built_in(language) or (language in self.outer_templates)), (('Sorry, ' + language) + ' is not a supported language.')
data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_str... | Unserializes a serialized php array and prints it to
the console as a data structure in the specified language.
Used to translate or convert a php array into a data structure
in another language. Currently supports, PHP, Python, Javascript,
and JSON.
Args:
string: a string of serialized php
language: a string represe... | codesearchnet |
def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:
pos = numpy.array([(xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos + (width / 2.0)), (ypos - (height / 2.0)), 0.0, (xpos - (width / 2.0)), (ypos + (height / 2.0)), 0.0, (xpos + (width / 2.0)), (y... | Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.
Args:
width (float): Width of the quad
height (float): Height of the quad
Keyword Args:
xpos (float): Center position x
ypos (float): Center position y
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance. | codesearchnet |
def _rot90_4D(images, k, name_scope):
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])
def _rot180():
return array_ops.reverse_v2(images, [1, 2])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])
... | Rotate batch of images counter-clockwise by 90 degrees `k` times.
Args:
images: 4-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the images are rotated by 90
degrees.
name_scope: A valid TensorFlow name scope.
Returns:
A 4-D `Tensor` of the same type and shape as `images`. | github-repos |
def ParseInteger(text, is_signed=False, is_long=False):
try:
if is_long:
result = long(text, 0)
else:
result = int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
ch... | Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer. | juraj-google-style |
def reset(target, containers=None, config=None):
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config) | Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will... | github-repos |
def get_contract_data(self, contract_name):
contract_data_path = (self.output_dir + '/{0}.json'.format(contract_name))
with open(contract_data_path, 'r') as contract_data_file:
contract_data = json.load(contract_data_file)
abi = contract_data['abi']
bytecode = contract_data['evm']['bytecode']['o... | Returns the contract data for a given contract
Args:
contract_name (str): Name of the contract to return.
Returns:
str, str: ABI and bytecode of the contract | codesearchnet |
def trainable_variables(scope=None):
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope) | Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
@compatibility(TF2)
Not compatible with eager exe... | github-repos |
def __init__(self, dependency_name, is_upstream=False, optional=False):
self.dependency_name = dependency_name
self.is_upstream = is_upstream
self.optional = optional | Constructor for `Extension`.
Args:
dependency_name: str, see `ExtDependency.dependency_name`
is_upstream: bool, see `ExtDependency.is_upstream` | juraj-google-style |
def to_json(self, variables=None):
variables_to_resolve = []
if variables:
for (key, value) in variables.items():
variables_to_resolve.append(Variable(key, value))
for k in self.get_parameter_definitions():
if ((not variables) or (k not in variables)):
variables_to_re... | Render the blueprint and return the template in json form.
Args:
variables (dict):
Optional dictionary providing/overriding variable values.
Returns:
str: the rendered CFN JSON template | codesearchnet |
def _seconds_have_elapsed(token, num_seconds):
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if ((then is None) or ((now - then) >= num_seconds)):
_log_timer_per_token[token] = now
return True
else:
return False | Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for w... | codesearchnet |
def log_every_n(level, msg, n, *args):
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, not (count % n), *args) | Logs 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the number of times this should be called before it is logged.
*args: The args to be substi... | juraj-google-style |
def parse_outputtrans(path_dir):
run_type = None
warning = None
efermi = None
gap = None
doping_levels = []
with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') \
as f:
for line in f:
if "WARNING" in line:
... | Parses .outputtrans file
Args:
path_dir: dir containing boltztrap.outputtrans
Returns:
tuple - (run_type, warning, efermi, gap, doping_levels) | juraj-google-style |
def ws010(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `ws010`'.format(value))
self._ws010 = value | Corresponds to IDD Field `ws010`
Wind speed corresponding to 1.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws010`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid ... | codesearchnet |
def patch_addContext(self, patch, text):
if (len(text) == 0):
return
pattern = text[patch.start2:(patch.start2 + patch.length1)]
padding = 0
while ((text.find(pattern) != text.rfind(pattern)) and ((self.Match_MaxBits == 0) or (len(pattern) < ((self.Match_MaxBits - self.Patch_Margin) - self.Patch... | Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text. | codesearchnet |
def variant(self, case_id, variant_id):
variant_id = int(variant_id)
gemini_query = "SELECT * from variants WHERE variant_id = {0}".format(
variant_id
)
individuals = []
case_obj = self.case(case_id)
for individual in case_obj.indiv... | Return a specific variant.
We solve this by building a gemini query and send it to _variants
Args:
case_id (str): Path to a gemini database
variant_id (int): A gemini variant id
Returns:
variant_obj (dict): A puzzle variant | juraj-google-style |
def get_fixture(self, fixture_id, head2head=None):
filters = []
if ((head2head is not None) and (int(head2head) > 0)):
self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.')
filters.append(self.__createFilter('head2head', head2head))
else:
self.logger.debug(f'G... | Loads a single fixture.
Args:
* fixture_id (str): the id of the fixture
* head2head (int, optional): load the previous n fixture of the two teams
Returns:
* :obj: json: the fixture-json | codesearchnet |
def calc_limits(data, dist=None, padding=0.25):
dmin = (sys.float_info.max if (dist is None) else dist.get('min', sys.float_info.max))
dmax = (sys.float_info.min if (dist is None) else dist.get('max', sys.float_info.min))
_min = min(min(data), dmin)
_max = max(max(data), dmax)
padding = (padding * (... | Calculate a suitable range for a histogram
Returns:
tuple of (min, max) | codesearchnet |
def GetUsernameForPath(self, path):
path = path.lower()
user_accounts = self._user_accounts.get(self.CURRENT_SESSION, {})
for user_account in iter(user_accounts.values()):
if (not user_account.user_directory):
continue
user_directory = user_account.user_directory.lower()
... | Retrieves a username for a specific path.
This is determining if a specific path is within a user's directory and
returning the username of the user if so.
Args:
path (str): path.
Returns:
str: username or None if the path does not appear to be within a user's
directory. | codesearchnet |
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) - events consumed: {2:d} - running: '
'{3!s}\n').format(
worker_status.identifier, worker_status.pid,
w... | Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status. | juraj-google-style |
def get_catalog_courses(self, catalog_id):
return self._load_data(
self.CATALOGS_COURSES_ENDPOINT.format(catalog_id),
default=[]
) | Return the courses included in a single course catalog by ID.
Args:
catalog_id (int): The catalog ID we want to retrieve.
Returns:
list: Courses of the catalog in question | juraj-google-style |
def load(self, email, master_token, android_id):
self._email = email
self._android_id = android_id
self._master_token = master_token
self.refresh()
return True | Authenticate to Google with the provided master token.
Args:
email (str): The account to use.
master_token (str): The master token.
android_id (str): An identifier for this client.
Raises:
LoginException: If there was a problem logging in. | juraj-google-style |
def create_graph_from_data(self, data, **kwargs):
self.arguments['{VERBOSE}'] = str(self.verbose).upper()
results = self._run_ccdr(data, verbose=self.verbose)
return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)}) | Apply causal discovery on observational data using CCDr.
Args:
data (pandas.DataFrame): DataFrame containing the data
Returns:
networkx.DiGraph: Solution given by the CCDR algorithm. | codesearchnet |
def set_nodes_vlan(site, nodes, interface, vlan_id):
def _to_network_address(host):
'Translate a host to a network address\n e.g:\n paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr\n '
splitted = host.split('.')
splitted[0] = ((splitted[0] + '-') + ... | Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan | codesearchnet |
def get_is_group_member(self, grp_name, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_is_group_member(grp_name, user) | Check if the given user is a member of the named group.
Note that a group maintainer is not considered a member unless the
user is also explicitly added as a member.
Args:
name (string): Name of group.
user_name (string): User of interest.
Returns:
(bool): False if user not a member. | juraj-google-style |
def __init__(self, latitude, longitude, time, status, mode=None):
super(LoranPosition, self).__init__(latitude, longitude)
self.time = time
self.status = status
self.mode = mode | Initialise a new ``LoranPosition`` object.
Args:
latitude (float): Fix's latitude
longitude (float): Fix's longitude
time (datetime.time): Time the fix was taken
status (bool): Whether the data is active
mode (str): Type of reading | juraj-google-style |
def learn(self, state_key, limit=1000):
self.t = 1
while self.t <= limit:
next_action_list = self.extract_possible_actions(state_key)
if len(next_action_list):
action_key = self.select_action(
state_key=state_key,
n... | Learning and searching the optimal solution.
Args:
state_key: Initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. | juraj-google-style |
def fn(x: int, y: str):
return x | Test function
Args:
x: The input
y: Also the input | github-repos |
def _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight):
per_example_aggregation_loss = _calculate_aggregation_loss_known(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num... | Calculates the aggregation loss per example.
Args:
logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
A mask set to 1 for examples that should use aggregation functions.
aggregation_... | github-repos |
def eigenvalues(df):
corr = np.corrcoef(df, rowvar=0)
eigvals = np.linalg.eigvals(corr)
return pd.Series(eigvals, df.columns, name='Eigenvalue') | Returns a pandas Series with eigenvalues of the correlation matrix.
Args:
df: pandas DataFrame with columns to run diagnostics on | codesearchnet |
class Wrapper(Layer):
def __init__(self, layer, **kwargs):
try:
assert isinstance(layer, Layer)
except Exception:
raise ValueError(f"Layer {layer} supplied to Wrapper isn't a supported layer type. Please ensure wrapped layer is a valid Keras layer.")
super().__init__... | Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers.
Args:
layer: The layer to be wrapped. | github-repos |
def eval(self, expr):
if (self.depth >= self.max_depth):
raise LimitationError('too much nesting')
if (self.steps >= self.max_steps):
raise LimitationError('too many steps')
self.depth += 1
self.steps += 1
res = expr.eval(self)
self.depth -= 1
return res | Evaluate an expression.
This does **not** add its argument (or its result) as an element of me!
That is the responsibility of the code that created the object. This
means that you need to :meth:`Environment.rec_new` any expression you
get from user input before evaluating it.
This, and any wrappers around it, are the... | codesearchnet |
def enroll_users_in_course(cls, enterprise_customer, course_id, course_mode, emails):
(existing_users, unregistered_emails) = cls.get_users_by_email(emails)
successes = []
pending = []
failures = []
for user in existing_users:
succeeded = cls.enroll_user(enterprise_customer, user, course_mod... | Enroll existing users in a course, and create a pending enrollment for nonexisting users.
Args:
enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment
course_id (str): The unique identifier of the course in which we're enrolling
course_mode (str): The mode with which we're enrolling in the cour... | codesearchnet |
def get_all_publications(return_namedtuples=True):
sources = [ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications]
publications = []
for source in sources:
publications.extend(filters.filter_publications(source()))
if return_namedtuples:... | Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures converted to namedtuple. | codesearchnet |
def FoldValue(self, value):
if value is False and self._data_type_definition.false_value is not None:
return self._data_type_definition.false_value
if value is True and self._data_type_definition.true_value is not None:
return self._data_type_definition.true_value
raise ValueError('No mat... | Folds the data type into a value.
Args:
value (object): value.
Returns:
object: folded value.
Raises:
ValueError: if the data type definition cannot be folded into the value. | juraj-google-style |
def __init__(self, email, password):
self.email = email
self.password = password
self.token = None
self.last_api_call = None
self.state = []
self.authenticate()
self.update_state_from_api() | Create the Trackr API interface object.
Args:
email (str): Trackr account email address.
password (str): Trackrr account password. | juraj-google-style |
def update(self, *args, **kwargs):
for k, v in args:
self[k] = v
for k, v in kwargs.items():
self[k] = v | Update ConfigMap from mapping/iterable.
If the key exists the entry is updated else it is added.
Args:
*args: variable length argument list. A valid argument is a two item
tuple/list. The first item is the key and the second is the value.
**kwargs: Arbitrary keyword arguments representing the config. | juraj-google-style |
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
... | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_spe... | github-repos |
def AddTrip(self, schedule=None, headsign=None, service_period=None, trip_id=None):
if (schedule is None):
assert (self._schedule is not None)
schedule = self._schedule
if (trip_id is None):
trip_id = util.FindUniqueId(schedule.trips)
if (service_period is None):
service_peri... | Add a trip to this route.
Args:
schedule: a Schedule object which will hold the new trip or None to use
the schedule of this route.
headsign: headsign of the trip as a string
service_period: a ServicePeriod object or None to use
schedule.GetDefaultServicePeriod()
trip_id: optional trip_id for the new trip
Returns:
a ... | codesearchnet |
def get_associated_resource(self, task):
if not task:
raise HPOneViewUnknownType(MSG_INVALID_TASK)
if task['category'] != 'tasks' and task['category'] != 'backups':
raise HPOneViewUnknownType(MSG_UNKNOWN_OBJECT_TYPE)
if task['type'] == 'TaskResour... | Retrieve a resource associated with a task.
Args:
task: task dict
Returns:
tuple: task (updated), the entity found (dict) | juraj-google-style |
def compute_order(bytecode: list[opcodes.Opcode], python_version) -> list[Block]:
processed_blocks = set()
blocks = _split_bytecode(bytecode, processed_blocks, python_version)
if python_version >= (3, 12):
blocks = _remove_jump_back_block(blocks)
blocks = _remove_jmp_to_get_anext_and_merge(b... | Split bytecode into blocks and order the blocks.
This builds an "ancestor first" ordering of the basic blocks of the bytecode.
Args:
bytecode: A list of instances of opcodes.Opcode. (E.g. returned from
opcodes.dis())
Returns:
A list of Block instances. | github-repos |
def calculate_subscription_lifecycle(subscription_id):
subscription = Subscription.objects.select_related('messageset', 'schedule').get(id=subscription_id)
behind = subscription.messages_behind()
if (behind == 0):
return
current_messageset = subscription.messageset
current_sequence_number = ... | Calculates the expected lifecycle position the subscription in
subscription_ids, and creates a BehindSubscription entry for them.
Args:
subscription_id (str): ID of subscription to calculate lifecycle for | codesearchnet |
def insert(self, loc, column, value):
if is_list_like(value):
if isinstance(value, pandas.Series):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index... | Insert new column data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted. | codesearchnet |
def save_screenshot(driver, name):
if hasattr(driver, 'save_screenshot'):
screenshot_dir = os.environ.get('SCREENSHOT_DIR')
if not screenshot_dir:
LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot')
return
elif not os.pa... | Save a screenshot of the browser.
The location of the screenshot can be configured
by the environment variable `SCREENSHOT_DIR`. If not set,
this defaults to the current working directory.
Args:
driver (selenium.webdriver): The Selenium-controlled browser.
name (str): A name for the screenshot, which will be used in... | juraj-google-style |
def release_client(self, client):
if isinstance(client, Client):
if (not self._is_expired_client(client)):
LOG.debug('Client is not expired. Adding back to pool')
self.__pool.append(client)
elif client.is_connected():
LOG.debug('Client is expired and connected. Di... | Releases a client object to the pool.
Args:
client: Client object. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.