code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
async def delete(self, service_id: str) -> bool:
await self.docker._query(
"services/{service_id}".format(service_id=service_id), method="DELETE"
)
return True | Remove a service
Args:
service_id: ID or name of the service
Returns:
True if successful | juraj-google-style |
def connect_output(self, node):
if (len(self.outputs) == self.max_outputs):
raise TooManyOutputsError('Attempted to connect too many nodes to the output of a node', max_outputs=self.max_outputs, stream=self.stream)
self.outputs.append(node) | Connect another node to our output.
This downstream node will automatically be triggered when we update
our output.
Args:
node (SGNode): The node that should receive our output | codesearchnet |
def validate(self):
if (not isinstance(self.enum, enumeration.EnumMeta)):
raise TypeError('enumeration type {0} must be of type EnumMeta'.format(self.enum))
if (self.value is not None):
if (not isinstance(self.value, self.enum)):
raise TypeError('enumeration {0} must be of type {1}'.format(self.value, self.enum))
if (type(self.value.value) not in six.integer_types):
raise TypeError('enumeration value must be an int')
elif (self.value.value > Enumeration.MAX):
raise ValueError('enumeration value greater than accepted max')
elif (self.value.value < Enumeration.MIN):
raise ValueError('enumeration value less than accepted min') | Verify that the value of the Enumeration is valid.
Raises:
TypeError: if the enum is not of type Enum
ValueError: if the value is not of the expected Enum subtype or if
the value cannot be represented by an unsigned 32-bit integer | codesearchnet |
def delete(self, key):
self._cur_batch.delete(key)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin() | Adds deletion of the entity with given key to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
key: key of the entity which should be deleted | juraj-google-style |
def get_step_by_name(self, name):
self._validate_step_name(name)
name = str(name)
try:
return self.all_upstream_steps[name]
except KeyError as e:
msg = 'No Step with name "{}" found. You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))
raise StepError(msg) from e | Extracts step by name from the pipeline.
Extracted Step is a fully functional pipeline as well.
All upstream Steps are already defined.
Args:
name (str): name of the step to be fetched
Returns:
Step (obj): extracted step | codesearchnet |
def fromRaw(cls, skype=None, raw={}):
return cls(skype, raw, **cls.rawToFields(raw)) | Create a new instance based on the raw properties of an API response.
This can be overridden to automatically create subclass instances based on the raw content.
Args:
skype (Skype): parent Skype instance
raw (dict): raw object, as provided by the API
Returns:
SkypeObj: the new class instance | codesearchnet |
def if_callable_call_with_formatted_string(callback, formattable_string, *args):
try:
formatted_string = formattable_string.format(*args)
except IndexError:
raise ValueError('Mismatch metween amount of insertion points in the formattable string\nand the amount of args given.')
if callable(callback):
callback(formatted_string) | If the callback is callable, format the string with the args and make a call.
Otherwise, do nothing.
Args:
callback (function): May or may not be callable.
formattable_string (str): A string with '{}'s inserted.
*args: A variable amount of arguments for the string formatting. Must correspond to the
amount of '{}'s in 'formattable_string'.
Raises:
ValueError | codesearchnet |
def np2str(value):
if (hasattr(value, 'dtype') and issubclass(value.dtype.type, (np.string_, np.object_)) and (value.size == 1)):
value = np.asscalar(value)
if (not isinstance(value, str)):
value = value.decode()
return value
else:
raise ValueError('Array is not a string type or is larger than 1') | Convert an `numpy.string_` to str.
Args:
value (ndarray): scalar or 1-element numpy array to convert
Raises:
ValueError: if value is array larger than 1-element or it is not of
type `numpy.string_` or it is not a numpy array | codesearchnet |
def shape_internal(input, name=None, optimize=True, out_type=None):
with ops.name_scope(name, 'Shape', [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
if not out_type:
out_type = dtypes.int32
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.is_fully_defined():
if not out_type:
return constant_op._tensor_shape_tensor_conversion_function(input_shape)
return constant(input_shape.as_list(), out_type, name=name)
if not out_type:
out_type = dtypes.int32
return gen_array_ops.shape(input, name=name, out_type=out_type) | Returns the shape of a tensor.
If `out_type` is not specified and the shape is fully known, then we look at
the dimension values to determine whether to return an int32 or int64 tensor.
If the shape is not fully known, we default to int32.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`. | github-repos |
def update(self, scope, at=0):
if hasattr(scope, '_mixins') and not at:
self._mixins.update(scope._mixins)
self[at]['__variables__'].update(scope[at]['__variables__'])
self[at]['__blocks__'].extend(scope[at]['__blocks__'])
self[at]['__names__'].extend(scope[at]['__names__']) | Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update | juraj-google-style |
def click_nowait(self, pattern, action='click', desc=None, **match_kwargs):
point = self.match(pattern, **match_kwargs)
if not point or not point.matched:
return None
func = getattr(self, action)
func(*point.pos)
return point | Return immediately if no image found
Args:
- pattern (str or Pattern): filename or an opencv image object.
- action (str): click or long_click
Returns:
Click point or None | juraj-google-style |
def guess_peb_size(path):
file_offset = 0
offsets = []
f = open(path, 'rb')
f.seek(0, 2)
file_size = (f.tell() + 1)
f.seek(0)
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBI_EC_HDR_MAGIC, buf):
start = m.start()
if (not file_offset):
file_offset = start
idx = start
else:
idx = (start + file_offset)
offsets.append(idx)
file_offset += FILE_CHUNK_SZ
f.close()
occurances = {}
for i in range(0, len(offsets)):
try:
diff = (offsets[i] - offsets[(i - 1)])
except:
diff = offsets[i]
if (diff not in occurances):
occurances[diff] = 0
occurances[diff] += 1
most_frequent = 0
block_size = None
for offset in occurances:
if (occurances[offset] > most_frequent):
most_frequent = occurances[offset]
block_size = offset
return block_size | Determine the most likely block size
Arguments:
Str:path -- Path to file.
Returns:
Int -- PEB size.
Searches file for Magic Number, picks most
common length between them. | codesearchnet |
def set_raw_datadir(self, directory=None):
if directory is None:
self.logger.info("no directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("directory does not exist")
return
self.raw_datadir = directory | Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory) | juraj-google-style |
def AddSymbolicLink(self, path, linked_path):
if self.file_system.FileEntryExistsByPath(path):
raise ValueError('Path: {0:s} already set.'.format(path))
self._AddParentDirectories(path)
self.file_system.AddFileEntry(
path, file_entry_type=definitions.FILE_ENTRY_TYPE_LINK,
link_data=linked_path) | Adds a symbolic link to the fake file system.
Args:
path (str): path of the symbolic link within the fake file system.
linked_path (str): path that is linked.
Raises:
ValueError: if the path is already set. | juraj-google-style |
def equals(self, actual_seq):
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return (expected == actual) | Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool | codesearchnet |
def add_mount_point(self, path, total_size=None):
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self._last_dev += 1
self.mount_points[path] = {
'idev': self._last_dev, 'total_size': total_size, 'used_size': 0
}
root_dir = (self.root if path == self.root.name
else self.create_dir(path))
root_dir.st_dev = self._last_dev
return self.mount_points[path] | Add a new mount point for a filesystem device.
The mount point gets a new unique device number.
Args:
path: The root path for the new mount path.
total_size: The new total size of the added filesystem device
in bytes. Defaults to infinite size.
Returns:
The newly created mount point dict.
Raises:
OSError: if trying to mount an existing mount point again. | juraj-google-style |
def _process_path_prefix(path_prefix):
_validate_path(path_prefix)
if (not _GCS_PATH_PREFIX_REGEX.match(path_prefix)):
raise ValueError(('Path prefix should have format /bucket, /bucket/, or /bucket/prefix but got %s.' % path_prefix))
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if (bucket_name_end != (- 1)):
bucket = path_prefix[:bucket_name_end]
prefix = (path_prefix[(bucket_name_end + 1):] or None)
return (bucket, prefix) | Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None. | codesearchnet |
def automatic_gamma_density(structure, kppa):
latt = structure.lattice
lengths = latt.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(round(mult / l)) for l in lengths]
num_div = [i if i > 0 else 1 for i in num_div]
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \
"{} / atom".format(kppa)
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0]) | Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density | juraj-google-style |
def parse_bucket_info(domain):
match = RGX_BUCKET.match(domain)
if match:
data = match.groupdict()
return data['bucket'], data['region'] or 'us-east-1' | Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple
(bucket_name, bucket_region) if a valid domain name, else `None`
>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')
('www.riotgames.com.br', 'us-west-2')
Args:
domain (`str`): Domain name to parse
Returns:
:obj:`list` of `str`: `str`,`None` | juraj-google-style |
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
if slow_tokenizer_class is None and fast_tokenizer_class is None:
raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class')
if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.')
if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.')
if slow_tokenizer_class is not None and fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class):
raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!')
if config_class in TOKENIZER_MAPPING._extra_content:
existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
if slow_tokenizer_class is None:
slow_tokenizer_class = existing_slow
if fast_tokenizer_class is None:
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok) | Register a new tokenizer in this mapping.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
The slow tokenizer to register.
fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
The fast tokenizer to register. | github-repos |
def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[InstructBlipProcessorKwargs]) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
output_kwargs = self._merge_kwargs(InstructBlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
encoding = BatchFeature()
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
_text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs'], return_tensors=None)
output_kwargs['text_kwargs']['return_tensors'] = return_tensors
if self.num_query_tokens is not None and images is not None:
text_encoding = {}
image_tokens = self.image_token.content * self.num_query_tokens
image_token_encoding = self.tokenizer([image_tokens] * len(text), add_special_tokens=False, return_tensors=None)
for k in _text_encoding:
text_encoding[k] = [img_encoding + txt_encoding for img_encoding, txt_encoding in zip(image_token_encoding[k], _text_encoding[k])]
else:
text_encoding = _text_encoding
if images is not None:
logger.warning_once('Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https:
text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors)
encoding.update(text_encoding)
qformer_text_encoding = self.qformer_tokenizer(text, **output_kwargs['text_kwargs'])
encoding['qformer_input_ids'] = qformer_text_encoding.pop('input_ids')
encoding['qformer_attention_mask'] = qformer_text_encoding.pop('attention_mask')
if images is not None:
image_encoding = self.image_processor(images, **output_kwargs['images_kwargs'])
encoding.update(image_encoding)
return encoding | This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences). | github-repos |
def StopTaskStorage(self, abort=False):
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
if os.path.isdir(self._merge_task_storage_path):
if abort:
shutil.rmtree(self._merge_task_storage_path)
else:
os.rmdir(self._merge_task_storage_path)
if os.path.isdir(self._processed_task_storage_path):
if abort:
shutil.rmtree(self._processed_task_storage_path)
else:
os.rmdir(self._processed_task_storage_path)
if os.path.isdir(self._task_storage_path):
if abort:
shutil.rmtree(self._task_storage_path)
else:
os.rmdir(self._task_storage_path)
self._merge_task_storage_path = None
self._processed_task_storage_path = None
self._task_storage_path = None | Removes the temporary path for the task storage.
The results of tasks will be lost on abort.
Args:
abort (bool): True to indicate the stop is issued on abort.
Raises:
IOError: if the storage type is not supported.
OSError: if the storage type is not supported. | juraj-google-style |
def extract_version(exepath, version_arg, word_index=(- 1), version_rank=3):
if isinstance(version_arg, basestring):
version_arg = [version_arg]
args = ([exepath] + version_arg)
(stdout, stderr, returncode) = _run_command(args)
if returncode:
raise RezBindError(('failed to execute %s: %s\n(error code %d)' % (exepath, stderr, returncode)))
stdout = stdout.strip().split('\n')[0].strip()
log(("extracting version from output: '%s'" % stdout))
try:
strver = stdout.split()[word_index]
toks = strver.replace('.', ' ').replace('-', ' ').split()
strver = '.'.join(toks[:version_rank])
version = Version(strver)
except Exception as e:
raise RezBindError(("failed to parse version from output '%s': %s" % (stdout, str(e))))
log(("extracted version: '%s'" % str(version)))
return version | Run an executable and get the program version.
Args:
exepath: Filepath to executable.
version_arg: Arg to pass to program, eg "-V". Can also be a list.
word_index: Expect the Nth word of output to be the version.
version_rank: Cap the version to this many tokens.
Returns:
`Version` object. | codesearchnet |
def change_wavelength(self, wavelength):
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | juraj-google-style |
def remove_file_from_tree(tree, file_path):
match = None
for item in tree:
if (item.get('path') == file_path):
match = item
break
if match:
tree.remove(match)
return tree | Remove a file from a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of a file to remove from a tree.
Returns:
The provided tree, but with the item matching the specified
file_path removed. | codesearchnet |
def get_folder_details(self, folder):
if (not is_valid_uuid(folder)):
raise StorageArgumentException('Invalid UUID for folder: {0}'.format(folder))
return self._authenticated_request.to_endpoint('folder/{}/'.format(folder)).return_body().get() | Get information on a given folder.
Args:
folder (str): The UUID of the requested folder.
Returns:
A dictionary of the folder details if found::
{
u'created_by': u'303447',
u'created_on': u'2017-03-21T14:06:32.293902Z',
u'description': u'',
u'entity_type': u'folder',
u'modified_by': u'303447',
u'modified_on': u'2017-03-21T14:06:32.293967Z',
u'name': u'myfolder',
u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682',
u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | codesearchnet |
def intent(method):
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except exceptions.MatrixError as e:
if isinstance(e.original_exception,
matrix_client.errors.MatrixRequestError):
self._handle_request_exception(e)
return method(self, *args, **kwargs)
else:
raise e
return wrapper | Helps object methods handle MatrixRequestError.
Args:
method(function): Object method to be wrapped
Method's object must have _handle_request_exception method that deals with
specific status codes and errcodes. | juraj-google-style |
def cluster_from_file(filename):
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords) | Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin. | codesearchnet |
def Decrypt(self, encrypted_data):
index_split = -(len(encrypted_data) % DES3.block_size)
if index_split:
remaining_encrypted_data = encrypted_data[index_split:]
encrypted_data = encrypted_data[:index_split]
else:
remaining_encrypted_data = b''
decrypted_data = self._des3_cipher.decrypt(encrypted_data)
return decrypted_data, remaining_encrypted_data | Decrypts the encrypted data.
Args:
encrypted_data (bytes): encrypted data.
Returns:
tuple[bytes, bytes]: decrypted data and remaining encrypted data. | juraj-google-style |
def append_memory_pdf_to_writer(input_pdf: bytes,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
if not input_pdf:
return
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
infile = io.BytesIO(input_pdf)
reader = PdfFileReader(infile)
for page_num in range(reader.numPages):
writer.addPage(reader.getPage(page_num)) | Appends a PDF (as bytes in memory) to a PyPDF2 writer.
Args:
input_pdf: the PDF, as ``bytes``
writer: the writer
start_recto: start a new right-hand page? | juraj-google-style |
def _add_ragged_partition(values, partition, tensor_dict, row_splits_dtype, validate):
if isinstance(partition, RaggedFeature.UniformRowLength):
if isinstance(values, ragged_tensor.RaggedTensor):
length = ops.convert_to_tensor(partition.length, dtype=row_splits_dtype)
return ragged_tensor.RaggedTensor.from_uniform_row_length(values, length, validate=validate)
else:
return array_ops.reshape(values, array_ops.concat([[-1, partition.length], array_ops.shape(values)[1:]], axis=0))
else:
partition_t = math_ops.cast(tensor_dict[partition.key], row_splits_dtype)
if isinstance(partition, RaggedFeature.RowSplits):
return ragged_tensor.RaggedTensor.from_row_splits(values, partition_t, validate=validate)
elif isinstance(partition, RaggedFeature.RowLengths):
return ragged_tensor.RaggedTensor.from_row_lengths(values, partition_t, validate=validate)
elif isinstance(partition, RaggedFeature.RowStarts):
return ragged_tensor.RaggedTensor.from_row_starts(values, partition_t, validate=validate)
elif isinstance(partition, RaggedFeature.RowLimits):
return ragged_tensor.RaggedTensor.from_row_limits(values, partition_t, validate=validate)
elif isinstance(partition, RaggedFeature.ValueRowIds):
return ragged_tensor.RaggedTensor.from_value_rowids(values, partition_t, validate=validate)
raise ValueError(f'Unhandled partition type {partition!r}') | Creates a RaggedTensor from a values tensor and a partition tensor.
Args:
values: The values tensor for the new RaggedTensor.
partition: The partition configuration object. Specifies the key that
should be used to look up the partition tensor (unless partition is a
RaggedFeature.UniformRowLength, in which case there is no partition
tensor).
tensor_dict: The dictionary mapping keys to tensors.
row_splits_dtype: The dtype for the partition tensor.
validate: Whether to validate that the values form a valid RaggedTensor.
Returns:
A new RaggedTensor formed from the values and partition tensors. | github-repos |
def constraint(self):
raise NotImplementedError | Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed. | github-repos |
def parse_docs(docs, marks):
if (docs is None):
return {}
indexs = []
for mark in marks:
i = docs.find(mark)
if (i >= 0):
indexs.append(i)
if (not indexs):
return {'$desc': textwrap.dedent(docs).strip()}
start = min(indexs)
start = docs.rfind('\n', 0, start)
yamltext = textwrap.dedent(docs[(start + 1):])
meta = yaml.load(yamltext)
meta['$desc'] = textwrap.dedent(docs[:start]).strip()
return meta | Parse YAML syntax content from docs
If docs is None, return {}
If docs has no YAML content, return {"$desc": docs}
Else, parse YAML content, return {"$desc": docs, YAML}
Args:
docs (str): docs to be parsed
marks (list): list of which indicate YAML content starts
Returns:
A dict contains information of docs | codesearchnet |
def state(self):
try:
return libvirt_utils.Domain.resolve_state(self.raw_state())
except vm_plugin.LagoVMDoesNotExistError:
return 'down'
except vm_plugin.LagoFailedToGetVMStateError:
return 'failed to get state'
except KeyError:
return 'unknown state' | Return a small description of the current status of the domain
Returns:
str: small description of the domain status, 'down' if it's not
found at all. | codesearchnet |
def histogram(self, tag, values, bins, step=None):
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step) | Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step | juraj-google-style |
def color_palette_dict(self, alpha=0.35):
color_dict = {}
for hkl in self.all_slab_entries.keys():
rgb_indices = [0, 1, 2]
color = [0, 0, 0, 1]
random.shuffle(rgb_indices)
for i, ind in enumerate(rgb_indices):
if i == 2:
break
color[ind] = np.random.uniform(0, 1)
clean_list = np.linspace(0, 1, len(self.all_slab_entries[hkl]))
for i, clean in enumerate(self.all_slab_entries[hkl].keys()):
c = copy.copy(color)
c[rgb_indices[2]] = clean_list[i]
color_dict[clean] = c
for ads_entry in self.all_slab_entries[hkl][clean]:
c_ads = copy.copy(c)
c_ads[3] = alpha
color_dict[ads_entry] = c_ads
return color_dict | Helper function to assign each facet a unique color using a dictionary.
Args:
alpha (float): Degree of transparency
return (dict): Dictionary of colors (r,g,b,a) when plotting surface
energy stability. The keys are individual surface entries where
clean surfaces have a solid color while the corresponding adsorbed
surface will be transparent. | juraj-google-style |
def load_spacy_rule(file_path: str) -> Dict:
with open(file_path) as fp:
return json.load(fp) | A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules | juraj-google-style |
def _validate_tensor_info(self, tensor_info):
if tensor_info is None:
raise AssertionError('All TensorInfo protos used in the SignatureDefs must have the name and dtype fields set.')
if tensor_info.WhichOneof('encoding') is None:
raise AssertionError(f"Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have one of the 'encoding' fields (e.g., name or coo_sparse) set.")
if tensor_info.WhichOneof('encoding') == 'composite_tensor':
for component in tensor_info.composite_tensor.components:
self._validate_tensor_info(component)
elif tensor_info.dtype == types_pb2.DT_INVALID:
raise AssertionError(f'Invalid `tensor_info`: {tensor_info}. All TensorInfo protos used in the SignatureDefs must have the dtype field set.') | Validates the `TensorInfo` proto.
Checks if the `encoding` (`name` or `coo_sparse` or `type_spec`) and
`dtype` fields exist and are non-empty.
Args:
tensor_info: `TensorInfo` protocol buffer to validate.
Raises:
AssertionError: If the `encoding` or `dtype` fields of the supplied
`TensorInfo` proto are not populated. | github-repos |
def list(self):
import IPython
data = [{'name': version['name'].split()[(- 1)], 'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']} for version in self.get_iterator()]
IPython.display.display(datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment. | codesearchnet |
def Audio(self, run, tag):
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) | Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`. | codesearchnet |
def __init__(self, name: Union[str, bytes], bound_context: context.Context, function_type: function_type_lib.FunctionType, children: Optional[List['AtomicFunction']]=None, call_options: CallOptions=CallOptions(), cached_graph: Optional[func_graph_module.FuncGraph]=None):
self._name = compat.as_bytes(name)
self._bound_context = bound_context
self._function_type = function_type
self._children = children if children else []
self._call_options = call_options
self._cached_definition = None
self._cached_graph = cached_graph
self._generated_graph = None
ref_key = (self._bound_context.function_scope_id, self.name)
if ref_key not in RUNTIME_FUNCTION_REFS:
RUNTIME_FUNCTION_REFS[ref_key] = 1
else:
RUNTIME_FUNCTION_REFS[ref_key] += 1 | Construct a new AtomicFunction.
Args:
name: str/bytes name of the runtime function in the bound context.
bound_context: interface to the runtime for the AtomicFunction.
function_type: input/output contract for the AtomicFunction
children: list of AtomicFunctions that are needed to call this one.
call_options: extra configuration options for the call.
cached_graph: FuncGraph that this AtomicFunction was generated from (if
known). Otherwise it will lazily construct a new corresponding FuncGraph
if ever needed. | github-repos |
def persist_as_png(structure_dict, filepath):
graph = _create_graph(structure_dict)
graph.write(filepath, format='png') | Saves pipeline diagram to disk as png file.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`
filepath (str): filepath to which the png with pipeline visualization should be persisted | codesearchnet |
def close_position(self, repay_only):
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close', data=json.dumps(params)) | Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented | codesearchnet |
def load_b26_file(file_name):
assert os.path.exists(file_name)
with open(file_name, 'r') as infile:
data = yaml.safe_load(infile)
return data | loads a .b26 file into a dictionary
Args:
file_name:
Returns: dictionary with keys instrument, scripts, probes | codesearchnet |
def assign(self, value, use_locking=False, name=None, read_value=True):
raise NotImplementedError | Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The updated variable. If `read_value` is false, instead returns None in
Eager mode and the assign op in graph mode. | github-repos |
def reset_dtensor_device(is_async: bool) -> None:
global _dtensor_singleton
device = dtensor_device.DTensorDevice(meshes=[], is_async=is_async)
_dtensor_singleton = device | Resets the Eager execution device for DTensor.
This function is only intended for testing and diagnostics.
Args:
is_async: If True, the device uses async execution. | github-repos |
def ping(self, timeout=12):
self.conn('POST', '{0}/users/ME/endpoints/{1}/active'.format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={'timeout': timeout}) | Send a keep-alive request for the endpoint.
Args:
timeout (int): maximum amount of time for the endpoint to stay active | codesearchnet |
def list_container_services_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.ContainerService/ContainerServices',
'?api-version=', ACS_API])
return do_get(endpoint, access_token) | List the container services in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON model. | juraj-google-style |
def save_scatter_table(self, fn, description=''):
data = {'description': description, 'time': datetime.now(), 'psd_scatter': (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries), 'version': tmatrix_aux.VERSION}
pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL) | Save the scattering lookup tables.
Save the state of the scattering lookup tables to a file.
This can be loaded later with load_scatter_table.
Other variables will not be saved, but this does not matter because
the results of the computations are based only on the contents
of the table.
Args:
fn: The name of the scattering table file.
description (optional): A description of the table. | codesearchnet |
def filter_by_doys(self, doys):
_filt_values = []
_filt_datetimes = []
for i, d in enumerate(self.datetimes):
if d in doys:
_filt_datetimes.append(d)
_filt_values.append(self._values[i])
_filt_header = self.header.duplicate()
return DailyCollection(_filt_header, _filt_values, _filt_datetimes) | Filter the Data Collection based on a list of days of the year (as integers).
Args:
doys: A List of days of the year [1..365]
Return:
A new Data Collection with filtered data | juraj-google-style |
def set_sleep_timer(self, sleep_time_seconds):
try:
if (sleep_time_seconds is None):
sleep_time = ''
else:
sleep_time = format(datetime.timedelta(seconds=int(sleep_time_seconds)))
self.avTransport.ConfigureSleepTimer([('InstanceID', 0), ('NewSleepTimerDuration', sleep_time)])
except SoCoUPnPException as err:
if ('Error 402 received' in str(err)):
raise ValueError('invalid sleep_time_seconds, must be integer value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer value between 0 and 86399 inclusive or None') | Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors | codesearchnet |
def is_remote_added(remote):
out = __salt__['cmd.run_all']((FLATPAK_BINARY_NAME + ' remotes'))
lines = out.splitlines()
for item in lines:
i = re.split('\\t+', item.rstrip('\t'))
if (i[0] == remote):
return True
return False | Determines if a remote exists.
Args:
remote (str): The remote's name.
Returns:
bool: True if the remote has already been added.
CLI Example:
.. code-block:: bash
salt '*' flatpak.is_remote_added flathub | codesearchnet |
def get_output_mask_at(self, node_index):
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None) | Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs). | github-repos |
def register_access_db(fullfilename: str, dsn: str, description: str) -> bool:
directory = os.path.dirname(fullfilename)
return create_sys_dsn(
access_driver,
SERVER="",
DESCRIPTION=description,
DSN=dsn,
DBQ=fullfilename,
DefaultDir=directory
) | (Windows only.)
Registers a Microsoft Access database with ODBC.
Args:
fullfilename: filename of the existing database
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created? | juraj-google-style |
def get_data_files_path():
return _os.path.dirname(_inspect.getfile(_sys._getframe(1))) | Get a direct path to the data files colocated with the script.
Returns:
The directory where files specified in data attribute of py_test
and py_binary are stored. | github-repos |
def FixValue(value):
if value.startswith('"') and value.endswith('"') or (value.startswith("'") and value.endswith("'")):
value = value[1:-1]
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
return value
return value | Helper function to fix values loaded from a config file.
Currently we strip bracketed quotes as well as convert numbers to
floats for configuration parameters expecting numerical data types.
Args:
value: value to be converted
Returns:
fixed value | github-repos |
def run_step(self, context):
logger.debug('starting')
self.set_step_input_context(context)
if self.while_decorator:
self.while_decorator.while_loop(context, self.run_foreach_or_conditional)
else:
self.run_foreach_or_conditional(context)
logger.debug('done') | Run a single pipeline step.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | codesearchnet |
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool:
store = crypto.X509Store()
for cert in certs_chain:
store.add_cert(cert)
default_verify_paths = ssl.get_default_verify_paths()
default_verify_file = default_verify_paths.cafile
default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None
default_verify_path = default_verify_paths.capath
default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None
ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else []
if default_verify_file:
ca_files.append(default_verify_file)
for ca_file in ca_files:
ca_file: Path
if ca_file.is_file():
with ca_file.open('r', encoding='ascii') as crt_f:
ca_certs_txt = crt_f.read()
ca_certs = extract_certs(ca_certs_txt)
for cert in ca_certs:
store.add_cert(cert)
ssl_context = ssl.create_default_context()
der_certs = ssl_context.get_ca_certs(binary_form=True)
pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs])
ca_certs = extract_certs(pem_certs)
for ca_cert in ca_certs:
store.add_cert(ca_cert)
store_context = crypto.X509StoreContext(store, amazon_cert)
try:
store_context.verify_certificate()
result = True
except crypto.X509StoreContextError:
result = False
return result | Verifies if Amazon and additional certificates creates chain of trust to a root CA.
Args:
certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL.
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not. | juraj-google-style |
def solution(swarm):
best = swarm[0]
cmp = comparator(best.best_fitness)
for particle in swarm:
if cmp(particle.best_fitness, best.best_fitness):
best = particle
return best | Determines the global best particle in the swarm.
Args:
swarm: iterable: an iterable that yields all particles in the swarm.
Returns:
cipy.algorithms.pso.Particle: The best particle in the swarm when
comparing the best_fitness values of the particles. | juraj-google-style |
def is_native_xmon_op(op: ops.Operation) -> bool:
return (isinstance(op, ops.GateOperation) and is_native_xmon_gate(op.gate)) | Check if the gate corresponding to an operation is a native xmon gate.
Args:
op: Input operation.
Returns:
True if the operation is native to the xmon, false otherwise. | codesearchnet |
def get_img_shape(img):
if isinstance(img, np.ndarray):
shape = img.shape
else:
shape = K.int_shape(img)
if K.image_data_format() == 'channels_last':
shape = list(shape)
shape.insert(1, shape[-1])
shape = tuple(shape[:-1])
return shape | Returns image shape in a backend agnostic manner.
Args:
img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or
`(image_dims..., channels)` if data_format='channels_last'.
Returns:
Tuple containing image shape information in `(samples, channels, image_dims...)` order. | juraj-google-style |
def verify_sans(amazon_cert: crypto.X509) -> bool:
cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())]
subject_alt_names = ''
for extention in cert_extentions:
if 'subjectAltName' in str(extention.get_short_name()):
subject_alt_names = extention.__str__()
break
result = 'echo-api.amazon.com' in subject_alt_names
return result | Verifies Subject Alternative Names (SANs) for Amazon certificate.
Args:
amazon_cert: Pycrypto X509 Amazon certificate.
Returns:
result: True if verification was successful, False if not. | juraj-google-style |
def swo_disable(self, port_mask):
res = self._dll.JLINKARM_SWO_DisableTarget(port_mask)
if (res != 0):
raise errors.JLinkException(res)
return None | Disables ITM & Stimulus ports.
Args:
self (JLink): the ``JLink`` instance
port_mask (int): mask specifying which ports to disable
Returns:
``None``
Raises:
JLinkException: on error | codesearchnet |
def _validate_testbed_name(name):
if not name:
raise MoblyConfigError("Test bed names can't be empty.")
name = str(name)
for char in name:
if char not in utils.valid_filename_chars:
raise MoblyConfigError(
'Char "%s" is not allowed in test bed names.' % char) | Validates the name of a test bed.
Since test bed names are used as part of the test run id, it needs to meet
certain requirements.
Args:
name: The test bed's name specified in config file.
Raises:
MoblyConfigError: The name does not meet any criteria. | juraj-google-style |
def _replace_variable_with_pattern(match):
positional = match.group('positional')
name = match.group('name')
template = match.group('template')
if (name is not None):
if (not template):
return _SINGLE_SEGMENT_PATTERN.format(name)
elif (template == '**'):
return _MULTI_SEGMENT_PATTERN.format(name)
else:
return _generate_pattern_for_template(template)
elif (positional == '*'):
return _SINGLE_SEGMENT_PATTERN
elif (positional == '**'):
return _MULTI_SEGMENT_PATTERN
else:
raise ValueError('Unknown template expression {}'.format(match.group(0))) | Replace a variable match with a pattern that can be used to validate it.
Args:
match (re.Match): A regular expression match
Returns:
str: A regular expression pattern that can be used to validate the
variable in an expanded path.
Raises:
ValueError: If an unexpected template expression is encountered. | codesearchnet |
def _get_app_path(url):
app_path = urlparse(url).path.rstrip('/')
if (not app_path.startswith('/')):
app_path = ('/' + app_path)
return app_path | Extract the app path from a Bokeh server URL
Args:
url (str) :
Returns:
str | codesearchnet |
def upsert_variant(self, variant_obj):
LOG.debug("Upserting variant %s", variant_obj['_id'])
try:
result = self.variant_collection.insert_one(variant_obj)
except DuplicateKeyError as err:
LOG.debug("Variant %s already exists in database", variant_obj['_id'])
result = self.variant_collection.find_one_and_update(
{'_id': variant_obj['_id']},
{
'$set': {
'compounds': variant_obj.get('compounds',[])
}
}
)
variant = self.variant_collection.find_one({'_id': variant_obj['_id']})
return result | Load a variant object, if the object already exists update compounds.
Args:
variant_obj(dict)
Returns:
result | juraj-google-style |
def _conditional_patch(src: symbolic.Symbolic, condition: Callable[[utils.KeyPath, Any, symbolic.Symbolic], bool], value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:
if value_fn is not None and value is not None:
raise ValueError('Either `value` or `value_fn` should be specified.')
def _fn(k, v, p):
if condition(k, v, p):
return value_fn(v) if value_fn else value
return v
return src.rebind(_fn, raise_on_no_change=False, skip_notification=skip_notification) | Recursive patch values on condition.
Args:
src: symbolic value to patch.
condition: Callable object with signature (key_path, value, parent) which
returns whether a field should be patched.
value: New value for field that satisfy `condition`.
value_fn: Callable object that produces new value based on old value.
If not None, `value` must be None.
skip_notification: If True, `on_change` event will not be triggered for this
operation. If None, the behavior is decided by `pg.notify_on_rebind`.
Please see `symbolic.Symbolic.rebind` for details.
Returns:
`src` after being patched. | github-repos |
def find_ruuvitags(bt_device=''):
log.info('Finding RuuviTags. Stop with Ctrl+C.')
datas = dict()
for new_data in RuuviTagSensor._get_ruuvitag_datas(bt_device=bt_device):
if (new_data[0] in datas):
continue
datas[new_data[0]] = new_data[1]
log.info(new_data[0])
log.info(new_data[1])
return datas | Find all RuuviTags. Function will print the mac and the state of the sensors when found.
Function will execute as long as it is stopped. Stop ecexution with Crtl+C.
Returns:
dict: MAC and state of found sensors | codesearchnet |
def on_core_metadata_event(self, event):
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if (not self._graph_defs):
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).') | Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details. | codesearchnet |
def n_feature_hash(feature, dims, seeds):
vec = np.zeros(sum(dims))
offset = 0
for (seed, dim) in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec | N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`. | codesearchnet |
def listen(self):
self.listening = True
if self.threading:
from threading import Thread
self.listen_thread = Thread(target=self.listen_loop)
self.listen_thread.daemon = True
self.listen_thread.start()
self.scheduler_thread = Thread(target=self.scheduler)
self.scheduler_thread.daemon = True
self.scheduler_thread.start()
else:
self.listen_loop() | Starts the listen loop. If threading is enabled, then the loop will
be started in its own thread.
Args:
None
Returns:
None | juraj-google-style |
def assemble_transition_model_from_gradable_adjectives(self):
df = pd.read_sql_table("gradableAdjectiveData", con=engine)
gb = df.groupby("adjective")
rs = gaussian_kde(
flatMap(
lambda g: gaussian_kde(get_respdevs(g[1]))
.resample(self.res)[0]
.tolist(),
gb,
)
).resample(self.res)[0]
for edge in self.edges(data=True):
edge[2]["ConditionalProbability"] = constructConditionalPDF(
gb, rs, edge
)
edge[2]["βs"] = np.tan(
edge[2]["ConditionalProbability"].resample(self.res)[0]
) | Add probability distribution functions constructed from gradable
adjective data to the edges of the analysis graph data structure.
Args:
adjective_data
res | juraj-google-style |
def scan_and_connect(self, devnames, timeout=DEF_TIMEOUT, calibration=True):
responses = self.scan_devices(devnames, timeout)
for dev in devnames:
if (dev not in responses):
logger.error('Failed to find device {} during scan'.format(dev))
return (False, [])
return self.connect([responses.get_device(dev) for dev in devnames], calibration) | Scan for and then connect to a set of one or more SK8s.
This method is intended to be a simple way to combine the steps of
running a BLE scan, checking the results and connecting to one or more
devices. When called, a scan is started for a period equal to `timeout`,
and a list of devices is collected. If at any point during the scan all of
the supplied devices are detected, the scan will be ended immediately.
After the scan has completed, the method will only proceed to creating
connections if the scan results contain all the specified devices.
Args:
devnames (list): a list of device names (1 or more)
timeout (float): a time period in seconds to run the scanning process
(will be terminated early if all devices in `devnames` are discovered)
Returns:
Returns the same results as :meth:`connect`. | codesearchnet |
def params(self, params):
url = furl(self._request.rawurl)
url = url.add(params)
self._request.url = url.url
self.add_matcher(matcher('QueryMatcher', params)) | Defines a set of URL query params to match.
Arguments:
params (dict): set of params to match.
Returns:
self: current Mock instance. | codesearchnet |
def occupations(self, site_label):
return sum(((atom.site.label == site_label) for atom in self.atoms)) | Number of these atoms occupying a specific site type.
Args:
site_label (Str): Label for the site type being considered.
Returns:
(Int): Number of atoms occupying sites of type `site_label`. | codesearchnet |
def __init__(self, memspace='private', memtype='mot_float_type'):
super().__init__(
memtype,
self.__class__.__name__ + '_' + memspace + '_' + memtype,
[],
resource_filename('mot', 'data/opencl/euclidian_norm.cl'),
var_replace_dict={'MEMSPACE': memspace, 'MEMTYPE': memtype}) | A CL functions for calculating the Euclidian distance between n values.
Args:
memspace (str): The memory space of the memtyped array (private, constant, global).
memtype (str): the memory type to use, double, float, mot_float_type, ... | juraj-google-style |
def goto_step(self, inst: InstanceNode) -> InstanceNode:
try:
return inst._entry(
inst.value.index(self.parse_value(inst.schema_node)))
except ValueError:
raise NonexistentInstance(inst.json_pointer(),
f"entry '{self.value!s}'") from None | Return member instance of `inst` addressed by the receiver.
Args:
inst: Current instance. | juraj-google-style |
def cc(project, detect_project=False):
from benchbuild.utils import cmd
cc_name = str(CFG['compiler']['c'])
wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project)
return cmd['./{}'.format(cc_name)] | Return a clang that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command. | codesearchnet |
def _eig_complex_symmetric(M: np.ndarray) -> Tuple[(np.ndarray, np.ndarray)]:
if (not np.allclose(M, M.transpose())):
raise np.linalg.LinAlgError('Not a symmetric matrix')
max_attempts = 16
for _ in range(max_attempts):
c = np.random.uniform(0, 1)
matrix = ((c * M.real) + ((1 - c) * M.imag))
(_, eigvecs) = np.linalg.eigh(matrix)
eigvecs = np.array(eigvecs, dtype=complex)
eigvals = np.diag(((eigvecs.transpose() @ M) @ eigvecs))
reconstructed = ((eigvecs @ np.diag(eigvals)) @ eigvecs.transpose())
if np.allclose(M, reconstructed):
return (eigvals, eigvecs)
raise np.linalg.LinAlgError('Cannot diagonalize complex symmetric matrix.') | Diagonalize a complex symmetric matrix. The eigenvalues are
complex, and the eigenvectors form an orthogonal matrix.
Returns:
eigenvalues, eigenvectors | codesearchnet |
def construct(cls, name, version=None):
other = VersionedObject(None)
other.name_ = name
other.version_ = (Version() if (version is None) else version)
return other | Create a VersionedObject directly from an object name and version.
Args:
name: Object name string.
version: Version object. | codesearchnet |
def GetUpdateTimestamp(self):
return self._last_update_timestamp | Return last update timestamp of this map.
Returns:
An int containing seconds since epoch, or None. | github-repos |
def properties(lines):
results = {}
for (i, line) in enumerate(lines):
type_ = line[3:6]
if (type_ not in ['CHG', 'RAD', 'ISO']):
continue
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[(10 + (j * 8)):(13 + (j * 8))])
val = int(line[(14 + (j * 8)):(17 + (j * 8))])
results[type_].append((idx, val))
return results | Parse properties block
Returns:
dict: {property_type: (atom_index, value)} | codesearchnet |
def collect_changes(self):
file_diffs = self._collect_file_diffs()
(candidate_feature_diffs, valid_init_diffs, inadmissible_diffs) = self._categorize_file_diffs(file_diffs)
new_feature_info = self._collect_feature_info(candidate_feature_diffs)
return CollectedChanges(file_diffs, candidate_feature_diffs, valid_init_diffs, inadmissible_diffs, new_feature_info) | Collect file and feature changes
Steps
1. Collects the files that have changed in this pull request as
compared to a comparison branch.
2. Categorize these file changes into admissible or inadmissible file
changes. Admissible file changes solely contribute python files to
the contrib subdirectory.
3. Collect features from admissible new files.
Returns:
CollectedChanges | codesearchnet |
def _IsTestFilename(filename):
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False | Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise. | juraj-google-style |
def send_rpc_response(self, rpc_tag, result, response):
if (rpc_tag not in self.in_flight_rpcs):
raise ArgumentError('In flight RPC could not be found, it may have timed out', rpc_tag=rpc_tag)
del self.in_flight_rpcs[rpc_tag]
response_message = {'response': response, 'result': result}
try:
self.rpc_results.set(rpc_tag, response_message)
except KeyError:
self._logger.warning('RPC response came but no one was waiting: response=%s', response) | Send a response to an RPC.
Args:
rpc_tag (str): The exact string given in a previous call to send_rpc_command
result (str): The result of the operation. The possible values of response are:
service_not_found, rpc_not_found, timeout, success, invalid_response,
invalid_arguments, execution_exception
response (bytes): The raw bytes that we should send back as a response. | codesearchnet |
def _download_files(self, client, flow_id):
output_file_path = os.path.join(
self.output_path, '.'.join((flow_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
flow = client.Flow(flow_id)
file_archive = flow.GetFilesArchive()
file_archive.WriteToFile(output_file_path)
fqdn = client.data.os_info.fqdn.lower()
client_output_file = os.path.join(self.output_path, fqdn)
if not os.path.isdir(client_output_file):
os.makedirs(client_output_file)
with zipfile.ZipFile(output_file_path) as archive:
archive.extractall(path=client_output_file)
os.remove(output_file_path)
return client_output_file | Download files from the specified flow.
Args:
client: GRR Client object to which to download flow data from.
flow_id: GRR flow ID.
Returns:
str: path of downloaded files. | juraj-google-style |
def trigger_chain(self):
trigger_stream = self.allocator.attach_stream(self.trigger_stream)
return (trigger_stream, self.trigger_cond) | Return a NodeInput tuple for creating a node.
Returns:
(StreamIdentifier, InputTrigger) | codesearchnet |
def __autofill_form_data(self, form_data, elements):
for element in elements:
if not element["name"] in form_data:
continue
if not len(form_data[element["name"]]) is 0:
continue
if element.name == "textarea":
form_data[element["name"]] = RandomInputHelper.get_for_type("textarea")
continue
if element.has_attr("type"):
form_data[element["name"]] = RandomInputHelper.get_for_type(element["type"]) | Autofill empty form data with random data.
Args:
form_data (obj): The {key: value} form data
elements list(obj): Soup elements.
Returns:
obj: The {key: value} | juraj-google-style |
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:
batch_time, seq_length, hidden_size = hidden_states.size()
batch_size = batch_time
msg_token = self.message_fc(hidden_states[:, 0, :])
msg_token = msg_token.view(batch_size, self.num_frames, hidden_size)
msg_token = msg_token + self.drop_path(self.message_attn(self.message_ln(msg_token))[0])
msg_token = msg_token.view(-1, 1, hidden_size)
hidden_states = torch.cat([hidden_states, msg_token], dim=1)
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
hidden_states = hidden_states[:, :seq_length, :]
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. | github-repos |
def gates_to_idx(gates, qregs):
sizes = [qr.size for qr in qregs.values()]
reg_idx = np.cumsum([0]+sizes)
regint = {}
for ind, qreg in enumerate(qregs.values()):
regint[qreg] = ind
out = np.zeros(2*len(gates), dtype=np.int32)
for idx, gate in enumerate(gates):
out[2*idx] = reg_idx[regint[gate[0][0]]]+gate[0][1]
out[2*idx+1] = reg_idx[regint[gate[1][0]]]+gate[1][1]
return out | Converts gate tuples into a nested list of integers.
Args:
gates (list): List of (QuantumRegister, int) pairs
representing gates.
qregs (dict): List of )QuantumRegister, int) tuples.
Returns:
list: Nested list of integers for gates. | juraj-google-style |
def _test_dir(temp_dir, test_name):
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir | Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory. | github-repos |
def __init__(self, *content: WritableTypes, style_files: Optional[Iterable[str]]=None, styles: Optional[Iterable[str]]=None, script_files: Optional[Iterable[str]]=None, scripts: Optional[Iterable[str]]=None) -> None:
super().__init__(*content, style_files=Html.StyleFiles(*(style_files or [])), styles=Html.Styles(*(styles or [])), script_files=Html.ScriptFiles(*(script_files or [])), scripts=Html.Scripts(*(scripts or []))) | Constructor.
Args:
*content: One or multiple body part (str, Html, lambda, None) of the HTML.
style_files: URLs for external styles to include.
styles: CSS styles to include.
script_files: URLs for external scripts to include.
scripts: JavaScript scripts to include. | github-repos |
def dismantle_graph(graph) -> None:
graph._functions.clear()
graph.Dismantle() | Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs. | github-repos |
def inception_resnet_v2(inputs, nb_classes=1001, is_training=True, dropout_keep_prob=0.8, reuse=None, scope='InceptionResnetV2', create_aux_logits=True, num_classes=None):
if (num_classes is not None):
warnings.warn('`num_classes` is deprecated. Switch to `nb_classes`. `num_classes` may be removed on or after 2019-04-23.')
nb_classes = num_classes
del num_classes
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, nb_classes], reuse=reuse) as var_scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_resnet_v2_base(inputs, scope=var_scope)
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID', scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3], padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, nb_classes, activation_fn=None, scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, nb_classes, activation_fn=None, scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return (logits, end_points) | Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
nb_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
num_classes: depricated alias for nb_classes
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model. | codesearchnet |
def add_evolved_transformer_hparams(hparams):
hparams.num_encoder_layers = 3
hparams.num_decoder_layers = 4
hparams.learning_rate_constant /= hparams.learning_rate_warmup_steps ** 0.5
hparams.learning_rate_schedule = (
"constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size")
hparams.learning_rate_decay_steps = 250000
return hparams | Add Evolved Transformer hparams.
Note: These are for the Adam optimizer, not the Adafactor optimizer used in
the paper.
Args:
hparams: Current hparams.
Returns:
hparams updated with Evolved Transformer values. | juraj-google-style |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
mru_values_dict = {}
for subkey in registry_key.GetSubkeys():
username_value = subkey.GetValueByName('UsernameHint')
if (username_value and username_value.data and username_value.DataIsString()):
username = username_value.GetDataAsObject()
else:
username = 'N/A'
mru_values_dict[subkey.name] = username
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = subkey.path
event_data.offset = subkey.offset
event_data.regvalue = {'Username hint': username}
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = mru_values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts events from a Terminal Server Client Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | codesearchnet |
def add_arguments(cls, parser):
parser.add_argument(
'-as-api', '--asana-api',
action='store',
nargs='?',
const='',
dest='asana_api',
help="[setting] asana api key.",
)
parser.add_argument(
'-gh-api', '--github-api',
action='store',
nargs='?',
const='',
dest='github_api',
help="[setting] github api token.",
)
parser.add_argument(
'--first-issue',
type=int,
action='store',
nargs='?',
const='',
help="[setting] only sync issues [FIRST_ISSUE] and above"
) | Add arguments to the parser for collection in app.args.
Args:
parser:
`argparse.ArgumentParser`. Parser.
Arguments added here are server on
self.args. | juraj-google-style |
def get_workers_list(cluster_resolver):
worker_job_name = 'worker'
cluster_spec = cluster_resolver.cluster_spec()
if not cluster_spec:
raise errors.UnavailableError('None', 'None', 'Cluster spec not found, your client must run in GCE environment.')
task_indices = cluster_spec.task_indices(worker_job_name)
workers_list = [cluster_spec.task_address(worker_job_name, i).replace(':8470', ':8466') for i in task_indices]
return ','.join(workers_list) | Returns a comma separated list of TPU worker host:port pairs.
Gets cluster_spec from cluster_resolver. Use the worker's task indices to
obtain and return a list of host:port pairs.
Args:
cluster_resolver: TensorFlow TPUClusterResolver instance.
Returns:
A string of comma separated list of host:port pairs. For example:
'10.2.0.1:8466,10.2.0.2:8466,10.2.0.3:8466,10.2.0.4:8466'
Raises:
UnavailableError: cluster_resolver doesn't contain a valid cluster_spec. | github-repos |
def _save_tensor_value_to_tmp_cache(self, cache_idx, updates, graph):
updates = self._merge_tensor_signatures(updates)
updates = array_ops.reshape(updates, [self._num_signature_dimensions()])
if graph not in self._temp_cache_var:
raise RuntimeError('graph is not in self._temp_cache_var')
if cache_idx >= len(self._temp_cache_var[graph]):
raise RuntimeError('cache_idx (%d) is out of range (%d)' % (cache_idx, len(self._temp_cache_var[graph])))
self._temp_cache_var[graph][cache_idx] = updates | Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates from signature name to
a tensor of dimension [1].
graph: A TensorFlow graph.
Raises:
RuntimeError:
(1) graph is not already in self._temp_cache_var, or
(2) cache_idx is out of range. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.