code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def buildTreeFromAlignment(filename,WorkingDir=None,SuppressStderr=None):
"""Builds a new tree from an existing alignment
filename: string, name of file containing the seqs or alignment
"""
app = Clustalw({'-tree':None,'-infile':filename},SuppressStderr=\
SuppressStderr,WorkingDir=WorkingDir)
app.Parameters['-align'].off()
return app() | Builds a new tree from an existing alignment
filename: string, name of file containing the seqs or alignment |
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.c_int()
return c_GetIntegerParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value | Please have a look at the function description/documentation in the V-REP user manual |
def partial(__fn, *a, **kw):
"""Wrap a note for injection of a partially applied function.
This allows for annotated functions to be injected for composition::
from jeni import annotate
@annotate('foo', bar=annotate.maybe('bar'))
def foobar(foo, bar=None):
return
@annotate('foo', annotate.partial(foobar))
def bazquux(foo, fn):
# fn: injector.partial(foobar)
return
Keyword arguments are treated as `maybe` when using partial, in order
to allow partial application of only the notes which can be provided,
where the caller could then apply arguments known to be unavailable in
the injector. Note that with Python 3 function annotations, all
annotations are injected as keyword arguments.
Injections on the partial function are lazy and not applied until the
injected partial function is called. See `eager_partial` to inject
eagerly.
"""
return (PARTIAL, (__fn, a, tuple(kw.items()))) | Wrap a note for injection of a partially applied function.
This allows for annotated functions to be injected for composition::
from jeni import annotate
@annotate('foo', bar=annotate.maybe('bar'))
def foobar(foo, bar=None):
return
@annotate('foo', annotate.partial(foobar))
def bazquux(foo, fn):
# fn: injector.partial(foobar)
return
Keyword arguments are treated as `maybe` when using partial, in order
to allow partial application of only the notes which can be provided,
where the caller could then apply arguments known to be unavailable in
the injector. Note that with Python 3 function annotations, all
annotations are injected as keyword arguments.
Injections on the partial function are lazy and not applied until the
injected partial function is called. See `eager_partial` to inject
eagerly. |
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
try:
return_val = self.handle.read(size)
if return_val == '':
print()
print("Piksi disconnected")
print()
raise IOError
return return_val
except OSError:
print()
print("Piksi disconnected")
print()
raise IOError | Read wrapper.
Parameters
----------
size : int
Number of bytes to read. |
def _start_app_and_connect(self):
"""Starts snippet apk on the device and connects to it.
After prechecks, this launches the snippet apk with an adb cmd in a
standing subprocess, checks the cmd response from the apk for protocol
version, then sets up the socket connection over adb port-forwarding.
Args:
ProtocolVersionError, if protocol info or port info cannot be
retrieved from the snippet apk.
"""
self._check_app_installed()
self.disable_hidden_api_blacklist()
persists_shell_cmd = self._get_persist_command()
# Use info here so people can follow along with the snippet startup
# process. Starting snippets can be slow, especially if there are
# multiple, and this avoids the perception that the framework is hanging
# for a long time doing nothing.
self.log.info('Launching snippet apk %s with protocol %d.%d',
self.package, _PROTOCOL_MAJOR_VERSION,
_PROTOCOL_MINOR_VERSION)
cmd = _LAUNCH_CMD % (persists_shell_cmd, self.package)
start_time = time.time()
self._proc = self._do_start_app(cmd)
# Check protocol version and get the device port
line = self._read_protocol_line()
match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line)
if not match or match.group(1) != '1':
raise ProtocolVersionError(self._ad, line)
line = self._read_protocol_line()
match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)
if not match:
raise ProtocolVersionError(self._ad, line)
self.device_port = int(match.group(1))
# Forward the device port to a new host port, and connect to that port
self.host_port = utils.get_available_host_port()
self._adb.forward(
['tcp:%d' % self.host_port,
'tcp:%d' % self.device_port])
self.connect()
# Yaaay! We're done!
self.log.debug('Snippet %s started after %.1fs on host port %s',
self.package, time.time() - start_time, self.host_port) | Starts snippet apk on the device and connects to it.
After prechecks, this launches the snippet apk with an adb cmd in a
standing subprocess, checks the cmd response from the apk for protocol
version, then sets up the socket connection over adb port-forwarding.
Args:
ProtocolVersionError, if protocol info or port info cannot be
retrieved from the snippet apk. |
def unregister(self, gadgets):
"""
Unregisters the specified gadget(s) if it/they has/have already been registered.
"gadgets" can be a single class or a tuple/list of classes to unregister.
"""
gadgets = maintenance.ensure_list(gadgets)
for gadget in gadgets:
while gadget in self._registry:
self._registry.remove(gadget) | Unregisters the specified gadget(s) if it/they has/have already been registered.
"gadgets" can be a single class or a tuple/list of classes to unregister. |
def output(self):
"""!
@brief Returns output dynamic of the network.
"""
if (self.__ccore_legion_dynamic_pointer is not None):
return wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer);
return self.__output; | !
@brief Returns output dynamic of the network. |
def cors_wrapper(func):
"""
Decorator for CORS
:param func: Flask method that handles requests and returns a response
:return: Same, but with permissive CORS headers set
"""
def _setdefault(obj, key, value):
if value == None:
return
obj.setdefault(key, value)
def output(*args, **kwargs):
response = func(*args, **kwargs)
headers = response.headers
_setdefault(headers, "Access-Control-Allow-Origin", "*")
_setdefault(headers, "Access-Control-Allow-Headers", flask.request.headers.get("Access-Control-Request-Headers"))
_setdefault(headers, "Access-Control-Allow-Methods", flask.request.headers.get("Access-Control-Request-Methods"))
_setdefault(headers, "Content-Type", "application/json")
_setdefault(headers, "Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload")
return response
output.provide_automatic_options = False
output.__name__ = func.__name__
return output | Decorator for CORS
:param func: Flask method that handles requests and returns a response
:return: Same, but with permissive CORS headers set |
def rename(name, new_name):
'''
Change the username for a named user
Args:
name (str): The user name to change
new_name (str): The new name for the current user
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.rename jsnuffy jshmoe
'''
if six.PY2:
name = _to_unicode(name)
new_name = _to_unicode(new_name)
# Load information for the current name
current_info = info(name)
if not current_info:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
# Look for an existing user with the new name
new_info = info(new_name)
if new_info:
raise CommandExecutionError(
'User \'{0}\' already exists'.format(new_name)
)
# Rename the user account
# Connect to WMI
with salt.utils.winapi.Com():
c = wmi.WMI(find_classes=0)
# Get the user object
try:
user = c.Win32_UserAccount(Name=name)[0]
except IndexError:
raise CommandExecutionError('User \'{0}\' does not exist'.format(name))
# Rename the user
result = user.Rename(new_name)[0]
# Check the result (0 means success)
if not result == 0:
# Define Error Dict
error_dict = {0: 'Success',
1: 'Instance not found',
2: 'Instance required',
3: 'Invalid parameter',
4: 'User not found',
5: 'Domain not found',
6: 'Operation is allowed only on the primary domain controller of the domain',
7: 'Operation is not allowed on the last administrative account',
8: 'Operation is not allowed on specified special groups: user, admin, local, or guest',
9: 'Other API error',
10: 'Internal error'}
raise CommandExecutionError(
'There was an error renaming \'{0}\' to \'{1}\'. Error: {2}'
.format(name, new_name, error_dict[result])
)
return info(new_name).get('name') == new_name | Change the username for a named user
Args:
name (str): The user name to change
new_name (str): The new name for the current user
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.rename jsnuffy jshmoe |
def conditions(self):
"""The if-else pairs."""
for idx in six.moves.range(1, len(self.children), 2):
yield (self.children[idx - 1], self.children[idx]) | The if-else pairs. |
def matrix_mult_opt_order(M):
"""Matrix chain multiplication optimal order
:param M: list of matrices
:returns: matrices opt, arg, such that opt[i][j] is the optimal number of
operations to compute M[i] * ... * M[j] when done in the order
(M[i] * ... * M[k]) * (M[k + 1] * ... * M[j]) for k = arg[i][j]
:complexity: :math:`O(n^2)`
"""
n = len(M)
r = [len(Mi) for Mi in M]
c = [len(Mi[0]) for Mi in M]
opt = [[0 for j in range(n)] for i in range(n)]
arg = [[None for j in range(n)] for i in range(n)]
for j_i in range(1, n): # loop on i, j of increasing j - i = j_i
for i in range(n - j_i):
j = i + j_i
opt[i][j] = float('inf')
for k in range(i, j):
alt = opt[i][k] + opt[k + 1][j] + r[i] * c[k] * c[j]
if opt[i][j] > alt:
opt[i][j] = alt
arg[i][j] = k
return opt, arg | Matrix chain multiplication optimal order
:param M: list of matrices
:returns: matrices opt, arg, such that opt[i][j] is the optimal number of
operations to compute M[i] * ... * M[j] when done in the order
(M[i] * ... * M[k]) * (M[k + 1] * ... * M[j]) for k = arg[i][j]
:complexity: :math:`O(n^2)` |
def push_zipkin_attrs(zipkin_attr):
"""Stores the zipkin attributes to thread local.
.. deprecated::
Use the Tracer interface which offers better multi-threading support.
push_zipkin_attrs will be removed in version 1.0.
:param zipkin_attr: tuple containing zipkin related attrs
:type zipkin_attr: :class:`zipkin.ZipkinAttrs`
"""
from py_zipkin.storage import ThreadLocalStack
log.warning('push_zipkin_attrs is deprecated. See DEPRECATIONS.rst for'
'details on how to migrate to using Tracer.')
return ThreadLocalStack().push(zipkin_attr) | Stores the zipkin attributes to thread local.
.. deprecated::
Use the Tracer interface which offers better multi-threading support.
push_zipkin_attrs will be removed in version 1.0.
:param zipkin_attr: tuple containing zipkin related attrs
:type zipkin_attr: :class:`zipkin.ZipkinAttrs` |
def __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
"""
content, si, stub = __RetrieveContent(host, port, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout)
# Get a ticket if we're connecting to localhost and password is not specified
if host == 'localhost' and not pwd:
try:
(user, pwd) = GetLocalTicket(si, user)
except:
pass # This is not supported against vCenter, and connecting
# with an empty password is fine in debug builds
# Login
try:
x = content.sessionManager.Login(user, pwd, None)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub | Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int |
def read_parquet(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile
if cls.read_parquet_remote_task is None:
return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs)
if not columns:
pf = ParquetFile(path)
columns = [
name
for name in pf.metadata.schema.names
if not PQ_INDEX_REGEX.match(name)
]
num_partitions = cls.frame_mgr_cls._compute_num_partitions()
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows.
# We need to transpose the oids array to fit our schema.
blk_partitions = np.array(
[
cls.read_parquet_remote_task._remote(
args=(path, cols, num_splits, kwargs),
num_return_vals=num_splits + 1,
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(remote_partitions), index, columns
)
return new_query_compiler | Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html |
def search_text(self, text_cursor, search_txt, search_flags):
"""
Searches a text in a text document.
:param text_cursor: Current text cursor
:param search_txt: Text to search
:param search_flags: QTextDocument.FindFlags
:returns: the list of occurrences, the current occurrence index
:rtype: tuple([], int)
"""
def compare_cursors(cursor_a, cursor_b):
"""
Compares two QTextCursor
:param cursor_a: cursor a
:param cursor_b: cursor b
:returns; True if both cursor are identical (same position, same
selection)
"""
return (cursor_b.selectionStart() >= cursor_a.selectionStart() and
cursor_b.selectionEnd() <= cursor_a.selectionEnd())
text_document = self._editor.document()
occurrences = []
index = -1
cursor = text_document.find(search_txt, 0, search_flags)
original_cursor = text_cursor
while not cursor.isNull():
if compare_cursors(cursor, original_cursor):
index = len(occurrences)
occurrences.append((cursor.selectionStart(),
cursor.selectionEnd()))
cursor.setPosition(cursor.position() + 1)
cursor = text_document.find(search_txt, cursor, search_flags)
return occurrences, index | Searches a text in a text document.
:param text_cursor: Current text cursor
:param search_txt: Text to search
:param search_flags: QTextDocument.FindFlags
:returns: the list of occurrences, the current occurrence index
:rtype: tuple([], int) |
def disable_ipv6():
"""
Disable ufw IPv6 support in /etc/default/ufw
"""
exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
'/etc/default/ufw'])
if exit_code == 0:
hookenv.log('IPv6 support in ufw disabled', level='INFO')
else:
hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
raise UFWError("Couldn't disable IPv6 support in ufw") | Disable ufw IPv6 support in /etc/default/ufw |
def result(self, state, row):
"Place the next queen at the given row."
col = state.index(None)
new = state[:]
new[col] = row
return new | Place the next queen at the given row. |
def get_attrs(self, *names):
"""Get multiple attributes from multiple objects."""
attrs = [getattr(self, name) for name in names]
return attrs | Get multiple attributes from multiple objects. |
def is_special_orthogonal(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
"""Determines if a matrix is approximately special orthogonal.
A matrix is special orthogonal if it is square and real and its transpose
is its inverse and its determinant is one.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is special orthogonal within the given tolerance.
"""
return (is_orthogonal(matrix, rtol=rtol, atol=atol) and
(matrix.shape[0] == 0 or
np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol))) | Determines if a matrix is approximately special orthogonal.
A matrix is special orthogonal if it is square and real and its transpose
is its inverse and its determinant is one.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is special orthogonal within the given tolerance. |
def add_patches(self, patches, after=None):
""" Add a list of patches to the patches list """
if after is None:
self.insert_patches(patches)
else:
self._check_patch(after)
patchlines = self._patchlines_before(after)
patchlines.append(self.patch2line[after])
for patch in patches:
patchline = PatchLine(patch)
patchlines.append(patchline)
self.patch2line[patchline.get_patch()] = patchline
patchlines.extend(self._patchlines_after(after))
self.patchlines = patchlines | Add a list of patches to the patches list |
def quadrature_weights(Ntheta):
"""Fourier-space weights needed to evaluate I_{mm'}
This is mostly an internal function, included here for backwards compatibility. See map2salm
and salm2map for more useful functions.
"""
import numpy as np
weights = np.empty(2*(Ntheta-1), dtype=np.complex128)
_quadrature_weights(Ntheta, weights)
return weights | Fourier-space weights needed to evaluate I_{mm'}
This is mostly an internal function, included here for backwards compatibility. See map2salm
and salm2map for more useful functions. |
def prepare_service(data):
"""Prepare service for catalog endpoint
Parameters:
data (Union[str, dict]): Service ID or service definition
Returns:
Tuple[str, dict]: str is ID and dict is service
Transform ``/v1/health/state/<state>``::
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis1",
"ServiceName": "redis"
}
to::
{
"ID": "redis1",
"Service": "redis"
}
Extract from /v1/health/service/<service>::
{
"Node": {...},
"Service": {
"ID": "redis1",
"Service": "redis",
"Tags": None,
"Address": "10.1.10.12",
"Port": 8000
},
"Checks": [...]
}
"""
if not data:
return None, {}
if isinstance(data, str):
return data, {}
# from /v1/health/service/<service>
if all(field in data for field in ("Node", "Service", "Checks")):
return data["Service"]["ID"], data["Service"]
# from /v1/health/checks/<service>
# from /v1/health/node/<node>
# from /v1/health/state/<state>
# from /v1/catalog/service/<service>
if all(field in data for field in ("ServiceName", "ServiceID")):
return data["ServiceID"], {
"ID": data["ServiceID"],
"Service": data["ServiceName"],
"Tags": data.get("ServiceTags"),
"Address": data.get("ServiceAddress"),
"Port": data.get("ServicePort"),
}
if list(data) == ["ID"]:
return data["ID"], {}
result = {}
if "Name" in data:
result["Service"] = data["Name"]
for k in ("Service", "ID", "Tags", "Address", "Port"):
if k in data:
result[k] = data[k]
return result.get("ID"), result | Prepare service for catalog endpoint
Parameters:
data (Union[str, dict]): Service ID or service definition
Returns:
Tuple[str, dict]: str is ID and dict is service
Transform ``/v1/health/state/<state>``::
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis1",
"ServiceName": "redis"
}
to::
{
"ID": "redis1",
"Service": "redis"
}
Extract from /v1/health/service/<service>::
{
"Node": {...},
"Service": {
"ID": "redis1",
"Service": "redis",
"Tags": None,
"Address": "10.1.10.12",
"Port": 8000
},
"Checks": [...]
} |
def extract_library_properties_from_selected_row(self):
""" Extracts properties library_os_path, library_path, library_name and tree_item_key from tree store row """
(model, row) = self.view.get_selection().get_selected()
tree_item_key = model[row][self.ID_STORAGE_ID]
library_item = model[row][self.ITEM_STORAGE_ID]
library_path = model[row][self.LIB_PATH_STORAGE_ID]
if isinstance(library_item, dict): # sub-tree
os_path = model[row][self.OS_PATH_STORAGE_ID]
return os_path, None, None, tree_item_key # relevant elements of sub-tree
assert isinstance(library_item, string_types)
library_os_path = library_item
library_name = library_os_path.split(os.path.sep)[-1]
return library_os_path, library_path, library_name, tree_item_key | Extracts properties library_os_path, library_path, library_name and tree_item_key from tree store row |
def custom_layouts_menu(self, value):
"""
Setter for **self.__custom_layouts_menu** attribute.
:param value: Attribute value.
:type value: QMenu
"""
if value is not None:
assert type(value) is QMenu, "'{0}' attribute: '{1}' type is not 'QMenu'!".format(
"custom_layouts_menu", value)
self.__custom_layouts_menu = value | Setter for **self.__custom_layouts_menu** attribute.
:param value: Attribute value.
:type value: QMenu |
def _get_common_block_structure(lhs_bs, rhs_bs):
"""For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2,
..., bm)`` generate the maximal common block structure so that every block
from aa and bb is contained in exactly one block of the resulting
structure. This is useful for determining how to apply the distributive
law when feeding two concatenated Circuit objects into each other.
Examples:
``(1, 1, 1), (2, 1) -> (2, 1)``
``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)``
Args:
lhs_bs (tuple): first block structure
rhs_bs (tuple): second block structure
"""
# for convenience the arguments may also be Circuit objects
if isinstance(lhs_bs, Circuit):
lhs_bs = lhs_bs.block_structure
if isinstance(rhs_bs, Circuit):
rhs_bs = rhs_bs.block_structure
if sum(lhs_bs) != sum(rhs_bs):
raise IncompatibleBlockStructures(
'Blockstructures have different total channel numbers.')
if len(lhs_bs) == len(rhs_bs) == 0:
return ()
i = j = 1
lsum = 0
while True:
lsum = sum(lhs_bs[:i])
rsum = sum(rhs_bs[:j])
if lsum < rsum:
i += 1
elif rsum < lsum:
j += 1
else:
break
return (lsum, ) + _get_common_block_structure(lhs_bs[i:], rhs_bs[j:]) | For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2,
..., bm)`` generate the maximal common block structure so that every block
from aa and bb is contained in exactly one block of the resulting
structure. This is useful for determining how to apply the distributive
law when feeding two concatenated Circuit objects into each other.
Examples:
``(1, 1, 1), (2, 1) -> (2, 1)``
``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)``
Args:
lhs_bs (tuple): first block structure
rhs_bs (tuple): second block structure |
def sanitize_http_request_cookies(client, event):
"""
Sanitizes http request cookies
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event
"""
# sanitize request.cookies dict
try:
cookies = event["context"]["request"]["cookies"]
event["context"]["request"]["cookies"] = varmap(_sanitize, cookies)
except (KeyError, TypeError):
pass
# sanitize request.header.cookie string
try:
cookie_string = event["context"]["request"]["headers"]["cookie"]
event["context"]["request"]["headers"]["cookie"] = _sanitize_string(cookie_string, "; ", "=")
except (KeyError, TypeError):
pass
return event | Sanitizes http request cookies
:param client: an ElasticAPM client
:param event: a transaction or error event
:return: The modified event |
def find_expected_error(self, delta_params='calc'):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
grad = self.calc_grad()
if list(delta_params) in [list('calc'), list('perfect')]:
jtj = (self.JTJ if delta_params == 'perfect' else
self._calc_damped_jtj(self.JTJ))
delta_params = self._calc_lm_step(jtj, self.calc_grad())
#If the model were linear, then the cost would be quadratic,
#with Hessian 2*`self.JTJ` and gradient `grad`
expected_error = (self.error + np.dot(grad, delta_params) +
np.dot(np.dot(self.JTJ, delta_params), delta_params))
return expected_error | Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params` |
def slugify(text, delim='-'):
"""Generate an ASCII-only slug."""
result = []
for word in _punct_re.split((text or '').lower()):
result.extend(codecs.encode(word, 'ascii', 'replace').split())
return delim.join([str(r) for r in result]) | Generate an ASCII-only slug. |
def split_log(logf):
"""split concat log into individual samples"""
flashpatt = re.compile(
r'\[FLASH\] Fast Length Adjustment of SHort reads\n(.+?)\[FLASH\] FLASH', flags=re.DOTALL)
return flashpatt.findall(logf) | split concat log into individual samples |
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
from scipy.io import wavfile
sr, wav_data = wavfile.read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr) | Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples. |
def get_files_by_layer(self, layer_name, file_pattern='*'):
"""
returns a list of all files with the given filename pattern in the
given PCC annotation layer
"""
layer_path = os.path.join(self.path, layer_name)
return list(dg.find_files(layer_path, file_pattern)) | returns a list of all files with the given filename pattern in the
given PCC annotation layer |
def delete_network_resource_property_entry(resource, prop):
""" Factory method for creating delete functions. """
def delete_func(cmd, resource_group_name, resource_name, item_name, no_wait=False): # pylint: disable=unused-argument
client = getattr(network_client_factory(cmd.cli_ctx), resource)
item = client.get(resource_group_name, resource_name)
keep_items = \
[x for x in item.__getattribute__(prop) if x.name.lower() != item_name.lower()]
_set_param(item, prop, keep_items)
if no_wait:
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item)
else:
result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, item).result()
if next((x for x in getattr(result, prop) if x.name.lower() == item_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(item_name, resource_name))
func_name = 'delete_network_resource_property_entry_{}_{}'.format(resource, prop)
setattr(sys.modules[__name__], func_name, delete_func)
return func_name | Factory method for creating delete functions. |
def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels | Get a nested dictionary of relation data for all related units |
def iqi(ql, qs, ns=None, rc=None, ot=None, coe=None,
moc=DEFAULT_ITER_MAXOBJECTCOUNT,):
# pylint: disable=line-too-long
"""
*New in pywbem 0.10 as experimental and finalized in 0.12.*
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
Execute a query in a namespace,
using the corresponding pull operations if supported by the WBEM server
or otherwise the corresponding traditional operation, and using the
Python :term:`py:generator` idiom to return the result.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
Other than the other i...() functions, this function does not return
a generator object directly, but as a property of the returned object.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
rc (:class:`py:bool`):
Controls whether a class definition describing the properties of the
returned instances will be returned.
`None` will cause the server to use its default of `False`.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
Zero and `None` are not allowed.
Returns:
:class:`~pywbem.IterQueryInstancesReturn`: An object with the
following properties:
* **query_result_class** (:class:`~pywbem.CIMClass`):
The query result class, if requested via the `rc` parameter.
`None`, if a query result class was not requested.
* **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`):
A generator object that iterates the CIM instances representing the
query result. These instances do not have an instance path set.
""" # noqa: E501
return CONN.IterQueryInstances(FilterQueryLanguage=ql,
FilterQuery=qs,
namespace=ns,
ReturnQueryResultClass=rc,
OperationTimeout=ot,
ContinueOnError=coe,
MaxObjectCount=moc) | *New in pywbem 0.10 as experimental and finalized in 0.12.*
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
Execute a query in a namespace,
using the corresponding pull operations if supported by the WBEM server
or otherwise the corresponding traditional operation, and using the
Python :term:`py:generator` idiom to return the result.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
Other than the other i...() functions, this function does not return
a generator object directly, but as a property of the returned object.
Parameters:
ql (:term:`string`):
Name of the query language used in the `qs` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
qs (:term:`string`):
Query string in the query language specified in the `ql` parameter.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the default namespace of the connection.
rc (:class:`py:bool`):
Controls whether a class definition describing the properties of the
returned instances will be returned.
`None` will cause the server to use its default of `False`.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
Zero and `None` are not allowed.
Returns:
:class:`~pywbem.IterQueryInstancesReturn`: An object with the
following properties:
* **query_result_class** (:class:`~pywbem.CIMClass`):
The query result class, if requested via the `rc` parameter.
`None`, if a query result class was not requested.
* **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`):
A generator object that iterates the CIM instances representing the
query result. These instances do not have an instance path set. |
def username_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
role = ET.SubElement(username, "role")
role.text = kwargs.pop('role')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def update_sg(self, context, sg, rule_id, action):
"""Begins the async update process."""
db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE)
if not db_sg:
return None
with context.session.begin():
job_body = dict(action="%s sg rule %s" % (action, rule_id),
resource_id=rule_id,
tenant_id=db_sg['tenant_id'])
job_body = dict(job=job_body)
job = job_api.create_job(context.elevated(), job_body)
rpc_client = QuarkSGAsyncProducerClient()
try:
rpc_client.populate_subtasks(context, sg, job['id'])
except om_exc.MessagingTimeout:
LOG.error("Failed to create subtasks. Rabbit running?")
return None
return {"job_id": job['id']} | Begins the async update process. |
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``DynamoDBExpiredTokenError`` if the security token expires.
"""
headers = {'X-Amz-Target' : '%s_%s.%s' % (self.ServiceName,
self.Version, action),
'Content-Type' : 'application/x-amz-json-1.0',
'Content-Length' : str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
response = self._mexe(http_request, sender=None,
override_num_retries=10,
retry_handler=self._retry_handler)
response_body = response.read()
boto.log.debug(response_body)
return json.loads(response_body, object_hook=object_hook) | :raises: ``DynamoDBExpiredTokenError`` if the security token expires. |
def copy(self, target=None, name=None):
""" Asynchronously creates a copy of this DriveItem and all it's
child elements.
:param target: target location to move to.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or Drive
:param name: a new name for the copy.
:rtype: CopyOperation
"""
if target is None and name is None:
raise ValueError('Must provide a target or a name (or both)')
if isinstance(target, Folder):
target_id = target.object_id
drive_id = target.drive_id
elif isinstance(target, Drive):
# we need the root folder
root_folder = target.get_root_folder()
if not root_folder:
return None
target_id = root_folder.object_id
drive_id = root_folder.drive_id
elif target is None:
target_id = None
drive_id = None
else:
raise ValueError('Target, if provided, must be a Folder or Drive')
if not self.object_id:
return None
if target_id == 'root':
raise ValueError("When copying, target id can't be 'root'")
url = self.build_url(
self._endpoints.get('copy').format(id=self.object_id))
if target_id and drive_id:
data = {'parentReference': {'id': target_id, 'driveId': drive_id}}
else:
data = {}
if name:
# incorporate the extension if the name provided has none.
if not Path(name).suffix and self.name:
name = name + Path(self.name).suffix
data['name'] = name
response = self.con.post(url, data=data)
if not response:
return None
# Find out if the server has run a Sync or Async operation
location = response.headers.get('Location', None)
if 'monitor' in location:
# Async operation
return CopyOperation(parent=self.drive, monitor_url=location)
else:
# Sync operation. Item is ready to be retrieved
path = urlparse(location).path
item_id = path.split('/')[-1]
return CopyOperation(parent=self.drive, item_id=item_id) | Asynchronously creates a copy of this DriveItem and all it's
child elements.
:param target: target location to move to.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or Drive
:param name: a new name for the copy.
:rtype: CopyOperation |
def _clear_zones(self, zone):
"""
Clear all expired zones from our status list.
:param zone: current zone being processed
:type zone: int
"""
cleared_zones = []
found_last_faulted = found_current = at_end = False
# First pass: Find our start spot.
it = iter(self._zones_faulted)
try:
while not found_last_faulted:
z = next(it)
if z == self._last_zone_fault:
found_last_faulted = True
break
except StopIteration:
at_end = True
# Continue until we find our end point and add zones in
# between to our clear list.
try:
while not at_end and not found_current:
z = next(it)
if z == zone:
found_current = True
break
else:
cleared_zones += [z]
except StopIteration:
pass
# Second pass: roll through the list again if we didn't find
# our end point and remove everything until we do.
if not found_current:
it = iter(self._zones_faulted)
try:
while not found_current:
z = next(it)
if z == zone:
found_current = True
break
else:
cleared_zones += [z]
except StopIteration:
pass
# Actually remove the zones and trigger the restores.
for z in cleared_zones:
self._update_zone(z, Zone.CLEAR) | Clear all expired zones from our status list.
:param zone: current zone being processed
:type zone: int |
def get_box_office_films(self):
"""uses a certain cinema (O2) and a certain day when non specialist films show (Wednesday) to get a list of the latest box office films"""
today = datetime.date.today()
next_wednesday = (today + datetime.timedelta((2 - today.weekday()) % 7)).strftime('%Y%m%d')
films = self.get_films(cinema=79, date = next_wednesday)
films = filter(lambda x: '3D' not in x['title'], films)
for film in films:
if '2D -' in film['title']:
film['title']=film['title'][5:]
return films | uses a certain cinema (O2) and a certain day when non specialist films show (Wednesday) to get a list of the latest box office films |
def generate_encodeable_characters(characters: Iterable[str],
encodings: Iterable[str]) -> Iterable[str]:
"""Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings.
"""
for c in characters:
for encoding in encodings:
try:
c.encode(encoding)
yield c
except UnicodeEncodeError:
pass | Generates the subset of 'characters' that can be encoded by 'encodings'.
Args:
characters: The characters to check for encodeability e.g. 'abcd'.
encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5'].
Returns:
The subset of 'characters' that can be encoded using one of the provided
encodings. |
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False):
""" Retrieve a text node from the API
:param textId: PrototypeText Identifier
:type textId: str
:param subreference: Passage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: Passage
:rtype: Passage
"""
key = _cache_key("Nautilus", self.name, "Passage", textId, subreference)
o = self.cache.get(key)
if o is not None:
return o
text, text_metadata = self.__getText__(textId)
if subreference is not None:
subreference = Reference(subreference)
passage = text.getTextualNode(subreference)
passage.set_metadata_from_collection(text_metadata)
self.cache.set(key, passage)
return passage | Retrieve a text node from the API
:param textId: PrototypeText Identifier
:type textId: str
:param subreference: Passage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: Passage
:rtype: Passage |
def _get_user_info(self, cmd, section, required=True,
accept_just_who=False):
"""Parse a user section."""
line = self.next_line()
if line.startswith(section + b' '):
return self._who_when(line[len(section + b' '):], cmd, section,
accept_just_who=accept_just_who)
elif required:
self.abort(errors.MissingSection, cmd, section)
else:
self.push_line(line)
return None | Parse a user section. |
def milestones(self):
'''Array of all milestones'''
if self.cache['milestones']: return self.cache['milestones']
milestone_xml = self.bc.list_milestones(self.id)
milestones = []
for node in ET.fromstring(milestone_xml).findall("milestone"):
milestones.append(Milestone(node))
milestones.sort()
milestones.reverse()
self.cache['milestones'] = milestones
return self.cache['milestones'] | Array of all milestones |
def sub_description(self):
"""Time and space dscription"""
gd = self.geo_description
td = self.time_description
if gd and td:
return '{}, {}. {} Rows.'.format(gd, td, self._p.count)
elif gd:
return '{}. {} Rows.'.format(gd, self._p.count)
elif td:
return '{}. {} Rows.'.format(td, self._p.count)
else:
return '{} Rows.'.format(self._p.count) | Time and space dscription |
def refs_to(cls, sha1, repo):
"""Returns all refs pointing to the given SHA1."""
matching = []
for refname in repo.listall_references():
symref = repo.lookup_reference(refname)
dref = symref.resolve()
oid = dref.target
commit = repo.get(oid)
if commit.hex == sha1:
matching.append(symref.shorthand)
return matching | Returns all refs pointing to the given SHA1. |
def _symbol_bars(
self,
symbols,
size,
_from=None,
to=None,
limit=None):
'''
Query historic_agg either minute or day in parallel
for multiple symbols, and return in dict.
symbols: list[str]
size: str ('day', 'minute')
_from: str or pd.Timestamp
to: str or pd.Timestamp
limit: str or int
return: dict[str -> pd.DataFrame]
'''
assert size in ('day', 'minute')
# temp workaround for less bars after masking by
# market hours
query_limit = limit
if query_limit is not None:
query_limit *= 2
@skip_http_error((404, 504))
def fetch(symbol):
df = self._api.polygon.historic_agg(
size, symbol, _from, to, query_limit).df
# zipline -> right label
# API result -> left label (beginning of bucket)
if size == 'minute':
df.index += pd.Timedelta('1min')
# mask out bars outside market hours
mask = self._cal.minutes_in_range(
df.index[0], df.index[-1],
).tz_convert(NY)
df = df.reindex(mask)
if limit is not None:
df = df.iloc[-limit:]
return df
return parallelize(fetch)(symbols) | Query historic_agg either minute or day in parallel
for multiple symbols, and return in dict.
symbols: list[str]
size: str ('day', 'minute')
_from: str or pd.Timestamp
to: str or pd.Timestamp
limit: str or int
return: dict[str -> pd.DataFrame] |
def to(location, code=falcon.HTTP_302):
"""Redirects to the specified location using the provided http_code (defaults to HTTP_302 FOUND)"""
raise falcon.http_status.HTTPStatus(code, {'location': location}) | Redirects to the specified location using the provided http_code (defaults to HTTP_302 FOUND) |
def determine_elected_candidates_in_order(self, candidate_votes):
"""
determine all candidates with at least a quota of votes in `candidate_votes'. returns results in
order of decreasing vote count. Any ties are resolved within this method.
"""
eligible_by_vote = defaultdict(list)
for candidate_id, votes in candidate_votes.candidate_votes_iter():
if candidate_id in self.candidates_elected:
continue
if votes < self.quota:
continue
eligible_by_vote[votes].append(candidate_id)
elected = []
for votes in reversed(sorted(eligible_by_vote)):
candidate_ids = eligible_by_vote[votes]
# we sort here to ensure stability, so external callers can hard-coded their response
candidate_ids.sort(key=self.candidate_order_fn)
if len(candidate_ids) == 1:
elected.append(candidate_ids[0])
else:
tie_breaker_round = self.find_tie_breaker(candidate_ids)
if tie_breaker_round is not None:
self.results.provision_used(
ActProvision("Multiple candidates elected with %d votes. Tie broken from previous totals." % (votes)))
for candidate_id in reversed(sorted(candidate_ids, key=tie_breaker_round.get_vote_count)):
elected.append(candidate_id)
else:
self.results.provision_used(
ActProvision("Multiple candidates elected with %d votes. Input required from Australian Electoral Officer." % (votes)))
permutations = list(itertools.permutations(candidate_ids))
permutations.sort()
choice = self.resolve_election_order(permutations)
for candidate_id in permutations[choice]:
elected.append(candidate_id)
return elected | determine all candidates with at least a quota of votes in `candidate_votes'. returns results in
order of decreasing vote count. Any ties are resolved within this method. |
def _copy(query_dict):
"""
Return a mutable copy of `query_dict`. This is a workaround to
Django bug #13572, which prevents QueryDict.copy from working.
"""
memo = { }
result = query_dict.__class__('',
encoding=query_dict.encoding,
mutable=True)
memo[id(query_dict)] = result
for key, value in dict.items(query_dict):
dict.__setitem__(result,
copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result | Return a mutable copy of `query_dict`. This is a workaround to
Django bug #13572, which prevents QueryDict.copy from working. |
def print_result(result):
"""Print the result, ascii encode if necessary"""
try:
print result
except UnicodeEncodeError:
if sys.stdout.encoding:
print result.encode(sys.stdout.encoding, 'replace')
else:
print result.encode('utf8')
except:
print "Unexpected error attempting to print result" | Print the result, ascii encode if necessary |
def open_file(filename, file_mode='w'):
"""
A static convenience function that performs the open of the recorder
file correctly for different versions of Python.
*New in pywbem 0.10.*
This covers the issue where the file should be opened in text mode but
that is done differently in Python 2 and Python 3.
The returned file-like object must be closed by the caller.
Parameters:
filename(:term:`string`):
Name of the file where the recorder output will be written
file_mode(:term:`string`):
Optional file mode. The default is 'w' which overwrites any
existing file. if 'a' is used, the data is appended to any
existing file.
Returns:
File-like object.
Example::
recorder = TestClientRecorder(...)
recorder_file = recorder.open_file('recorder.log')
. . . # Perform WBEM operations using the recorder
recorder_file.close()
"""
if six.PY2:
# Open with codecs to define text mode
return codecs.open(filename, mode=file_mode, encoding='utf-8')
return open(filename, file_mode, encoding='utf8') | A static convenience function that performs the open of the recorder
file correctly for different versions of Python.
*New in pywbem 0.10.*
This covers the issue where the file should be opened in text mode but
that is done differently in Python 2 and Python 3.
The returned file-like object must be closed by the caller.
Parameters:
filename(:term:`string`):
Name of the file where the recorder output will be written
file_mode(:term:`string`):
Optional file mode. The default is 'w' which overwrites any
existing file. if 'a' is used, the data is appended to any
existing file.
Returns:
File-like object.
Example::
recorder = TestClientRecorder(...)
recorder_file = recorder.open_file('recorder.log')
. . . # Perform WBEM operations using the recorder
recorder_file.close() |
def named_side_effect(original, name):
"""Decorator for function or method that do not modify the recorder state
but have some side effects that can't be replayed.
What it does in recording mode is keep the function name, arguments,
keyword and result as a side effect that will be recorded in the journal.
In replay mode, it will only pop the next expected side-effect, verify
the function name, arguments and keywords and return the expected result
without executing the real function code. If the function name, arguments
or keywords were to be different than the expected ones, it would raise
L{ReplayError}. Should work for any function or method."""
def wrapper(callable, *args, **kwargs):
return _side_effect_wrapper(callable, args, kwargs, name)
return wrapper | Decorator for function or method that do not modify the recorder state
but have some side effects that can't be replayed.
What it does in recording mode is keep the function name, arguments,
keyword and result as a side effect that will be recorded in the journal.
In replay mode, it will only pop the next expected side-effect, verify
the function name, arguments and keywords and return the expected result
without executing the real function code. If the function name, arguments
or keywords were to be different than the expected ones, it would raise
L{ReplayError}. Should work for any function or method. |
def vals(cls):
"""Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random.
"""
if cls._vals:
return cls._vals
# If `_vals` is not yet defined, create it
# ----------------------------------------
_vals = []
# get the keys from all base-classes aswell (when this is subclasses)
for mro in cls.__bases__:
# base classes below `KeyCollection` (e.g. `object`) wont work
if issubclass(mro, KeyCollection):
_vals.extend(mro.vals())
# Get the keys from this particular subclass
# Only non-hidden (no '_') and variables (non-callable)
_vals.extend([
vv for kk, vv in vars(cls).items()
if not kk.startswith('_') and not callable(getattr(cls, kk))
])
# Store for future retrieval
cls._vals = _vals
return cls._vals | Return this class's attribute values (those not stating with '_').
Returns
-------
_vals : list of objects
List of values of internal attributes. Order is effectiely random. |
def find_all_valid_decks(provider: Provider, deck_version: int,
prod: bool=True) -> Generator:
'''
Scan the blockchain for PeerAssets decks, returns list of deck objects.
: provider - provider instance
: version - deck protocol version (0, 1, 2, ...)
: test True/False - test or production P2TH
'''
pa_params = param_query(provider.network)
if prod:
p2th = pa_params.P2TH_addr
else:
p2th = pa_params.test_P2TH_addr
if isinstance(provider, RpcNode):
deck_spawns = (provider.getrawtransaction(i, 1)
for i in find_deck_spawns(provider))
else:
try:
deck_spawns = (provider.getrawtransaction(i, 1) for i in
provider.listtransactions(p2th))
except TypeError as err: # it will except if no transactions are found on this P2TH
raise EmptyP2THDirectory(err)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as th:
for result in th.map(deck_parser, ((provider, rawtx, deck_version, p2th) for rawtx in deck_spawns)):
if result:
yield result | Scan the blockchain for PeerAssets decks, returns list of deck objects.
: provider - provider instance
: version - deck protocol version (0, 1, 2, ...)
: test True/False - test or production P2TH |
def user_parser(user):
"""
Parses a user object
"""
if __is_deleted(user):
return deleted_parser(user)
if user['id'] in item_types:
raise Exception('Not a user name')
if type(user['id']) == int:
raise Exception('Not a user name')
return User(
user['id'],
user['delay'],
user['created'],
user['karma'],
user['about'],
user['submitted'],
) | Parses a user object |
def wait_with_ioloop(self, ioloop, timeout=None):
"""Do blocking wait until condition is event is set.
Parameters
----------
ioloop : tornadio.ioloop.IOLoop instance
MUST be the same ioloop that set() / clear() is called from
timeout : float, int or None
If not None, only wait up to `timeout` seconds for event to be set.
Return Value
------------
flag : True if event was set within timeout, otherwise False.
Notes
-----
This will deadlock if called in the ioloop!
"""
f = Future()
def cb():
return gen.chain_future(self.until_set(), f)
ioloop.add_callback(cb)
try:
f.result(timeout)
return True
except TimeoutError:
return self._flag | Do blocking wait until condition is event is set.
Parameters
----------
ioloop : tornadio.ioloop.IOLoop instance
MUST be the same ioloop that set() / clear() is called from
timeout : float, int or None
If not None, only wait up to `timeout` seconds for event to be set.
Return Value
------------
flag : True if event was set within timeout, otherwise False.
Notes
-----
This will deadlock if called in the ioloop! |
def capabilities(self, width, height, rotate, mode="1"):
"""
Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str
"""
assert mode in ("1", "RGB", "RGBA")
assert rotate in (0, 1, 2, 3)
self._w = width
self._h = height
self.width = width if rotate % 2 == 0 else height
self.height = height if rotate % 2 == 0 else width
self.size = (self.width, self.height)
self.bounding_box = (0, 0, self.width - 1, self.height - 1)
self.rotate = rotate
self.mode = mode
self.persist = False | Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str |
def __generate_method(name):
"""
Wraps the DataFrame's original method by name to return the derived class instance.
"""
try:
func = getattr(DataFrame, name)
except AttributeError as e:
# PySpark version is too old
def func(self, *args, **kwargs):
raise e
return func
wraps = getattr(functools, "wraps", lambda _: lambda f: f) # py3.4+
@wraps(func)
def _wrapper(self, *args, **kwargs):
dataframe = func(self, *args, **kwargs)
if self.__class__ != SourcedDataFrame \
and isinstance(self, SourcedDataFrame) \
and isinstance(dataframe, DataFrame):
return self.__class__(dataframe._jdf, self._session, self._implicits)
return dataframe
return _wrapper | Wraps the DataFrame's original method by name to return the derived class instance. |
def serial_udb_extra_f8_encode(self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH):
'''
Backwards compatible version of SERIAL_UDB_EXTRA F8: format
sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float)
sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float)
sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float)
sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float)
sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float)
sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float)
sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float)
'''
return MAVLink_serial_udb_extra_f8_message(sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH) | Backwards compatible version of SERIAL_UDB_EXTRA F8: format
sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float)
sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float)
sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float)
sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float)
sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float)
sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float)
sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float) |
def assemble_binary_rules(self, main, jar, custom_rules=None):
"""Creates an ordered list of rules suitable for fully shading the given binary.
The default rules will ensure the `main` class name is un-changed along with a minimal set of
support classes but that everything else will be shaded.
Any `custom_rules` are given highest precedence and so they can interfere with this automatic
binary shading. In general it's safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over. An example would be excluding the
`org.junit.Test` annotation class from shading since a tool running junit needs to be able
to scan for this annotation inside the user code it tests.
:param unicode main: The main class to preserve as the entry point.
:param unicode jar: The path of the binary jar the `main` class lives in.
:param list custom_rules: An optional list of custom `Shader.Rule`s.
:returns: a precedence-ordered list of `Shader.Rule`s
"""
# If a class is matched by multiple rules, the 1st lexical match wins (see:
# https://code.google.com/p/jarjar/wiki/CommandLineDocs#Rules_file_format).
# As such we 1st ensure the `main` package and the jre packages have exclusion rules and
# then apply a final set of shading rules to everything else at lowest precedence.
# Custom rules take precedence.
rules = list(custom_rules or [])
# Exclude the main entrypoint's package from shading. There may be package-private classes that
# the main class accesses so we must preserve the whole package).
parts = main.rsplit('.', 1)
if len(parts) == 2:
main_package = parts[0]
else:
# There is no package component, so the main class is in the root (default) package.
main_package = None
rules.append(self.exclude_package(main_package))
rules.extend(self.exclude_package(system_pkg, recursive=True)
for system_pkg in self._binary_package_excludes)
# Shade everything else.
#
# NB: A simpler way to do this jumps out - just emit 1 wildcard rule:
#
# rule **.* _shaded_.@1.@2
#
# Unfortunately, as of jarjar 1.4 this wildcard catch-all technique improperly transforms
# resources in the `main_package`. The jarjar binary jar itself has its command line help text
# stored as a resource in its main's package and so using a catch-all like this causes
# recursively shading jarjar with itself using this class to fail!
#
# As a result we explicitly shade all the non `main_package` packages in the binary jar instead
# which does support recursively shading jarjar.
rules.extend(self.shade_package(pkg) for pkg in sorted(self._iter_jar_packages(jar))
if pkg != main_package)
return rules | Creates an ordered list of rules suitable for fully shading the given binary.
The default rules will ensure the `main` class name is un-changed along with a minimal set of
support classes but that everything else will be shaded.
Any `custom_rules` are given highest precedence and so they can interfere with this automatic
binary shading. In general it's safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over. An example would be excluding the
`org.junit.Test` annotation class from shading since a tool running junit needs to be able
to scan for this annotation inside the user code it tests.
:param unicode main: The main class to preserve as the entry point.
:param unicode jar: The path of the binary jar the `main` class lives in.
:param list custom_rules: An optional list of custom `Shader.Rule`s.
:returns: a precedence-ordered list of `Shader.Rule`s |
def intermediate_cpfs(self) -> List[CPF]:
'''Returns list of intermediate-fluent CPFs in level order.'''
_, cpfs = self.cpfs
interm_cpfs = [cpf for cpf in cpfs if cpf.name in self.intermediate_fluents]
interm_cpfs = sorted(interm_cpfs, key=lambda cpf: (self.intermediate_fluents[cpf.name].level, cpf.name))
return interm_cpfs | Returns list of intermediate-fluent CPFs in level order. |
def getsuffix(subject):
"""
Returns the suffix of a filename. If the file has no suffix, returns None.
Can return an empty string if the filenam ends with a period.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
return subject[index+1:]
return None | Returns the suffix of a filename. If the file has no suffix, returns None.
Can return an empty string if the filenam ends with a period. |
def _parse_type(self, element, types):
"""Parse a 'complexType' element.
@param element: The top-level complexType element
@param types: A map of the elements of all available complexType's.
@return: The schema for the complexType.
"""
name = element.attrib["name"]
type = element.attrib["type"]
if not type.startswith("tns:"):
raise RuntimeError("Unexpected element type %s" % type)
type = type[4:]
[children] = types[type][0]
types[type][1] = True
self._remove_namespace_from_tag(children)
if children.tag not in ("sequence", "choice"):
raise RuntimeError("Unexpected children type %s" % children.tag)
if children[0].attrib["name"] == "item":
schema = SequenceSchema(name)
else:
schema = NodeSchema(name)
for child in children:
self._remove_namespace_from_tag(child)
if child.tag == "element":
name, type, min_occurs, max_occurs = self._parse_child(child)
if type in self.leaf_types:
if max_occurs != 1:
raise RuntimeError("Unexpected max value for leaf")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Attempt to add leaf to a non-node")
schema.add(LeafSchema(name), min_occurs=min_occurs)
else:
if name == "item": # sequence
if not isinstance(schema, SequenceSchema):
raise RuntimeError("Attempt to set child for "
"non-sequence")
schema.set(self._parse_type(child, types),
min_occurs=min_occurs,
max_occurs=max_occurs)
else:
if max_occurs != 1:
raise RuntimeError("Unexpected max for node")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Unexpected schema type")
schema.add(self._parse_type(child, types),
min_occurs=min_occurs)
elif child.tag == "choice":
pass
else:
raise RuntimeError("Unexpected child type")
return schema | Parse a 'complexType' element.
@param element: The top-level complexType element
@param types: A map of the elements of all available complexType's.
@return: The schema for the complexType. |
def partial_distance_covariance(x, y, z):
"""
Partial distance covariance estimator.
Compute the estimator for the partial distance covariance of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance covariance
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance covariance.
See Also
--------
partial_distance_correlation
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> c = np.array([[1, 3, 4],
... [5, 7, 8],
... [9, 11, 15],
... [13, 15, 16]])
>>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS
0.0024298...
>>> dcor.partial_distance_covariance(a, b, c)
0.0347030...
>>> dcor.partial_distance_covariance(b, b, c)
0.4956241...
"""
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
proj = u_complementary_projection(c)
return u_product(proj(a), proj(b)) | Partial distance covariance estimator.
Compute the estimator for the partial distance covariance of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance covariance
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance covariance.
See Also
--------
partial_distance_correlation
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> c = np.array([[1, 3, 4],
... [5, 7, 8],
... [9, 11, 15],
... [13, 15, 16]])
>>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS
0.0024298...
>>> dcor.partial_distance_covariance(a, b, c)
0.0347030...
>>> dcor.partial_distance_covariance(b, b, c)
0.4956241... |
def hash_file(path, block_size=65536):
"""Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
"""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest() | Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block |
def get_delete_api(self, resource):
""" Generates the meta descriptor for the resource item api. """
parameters = self.delete_item_parameters(resource)
get_item_api = {
'path': '/%s/{id}/' % resource.get_api_name(),
'description': 'Operations on %s' % resource.model.__name__,
"responseClass": "void",
'operations': [
{
'httpMethod': 'DELETE',
'nickname': 'delete%s' % resource.model.__name__,
'summary': 'Delete %s by its unique ID' %
resource.model.__name__,
'parameters': parameters,
}
]
}
return get_item_api | Generates the meta descriptor for the resource item api. |
def exec_command_on_nodes(nodes, cmd, label, conn_params=None):
"""Execute a command on a node (id or hostname) or on a set of nodes.
:param nodes: list of targets of the command cmd. Each must be an
execo.Host.
:param cmd: string representing the command to run on the
remote nodes.
:param label: string for debugging purpose.
:param conn_params: connection parameters passed to the execo.Remote
function
"""
if isinstance(nodes, BASESTRING):
nodes = [nodes]
if conn_params is None:
conn_params = DEFAULT_CONN_PARAMS
logger.debug("Running %s on %s ", label, nodes)
remote = ex.get_remote(cmd, nodes, conn_params)
remote.run()
if not remote.finished_ok:
raise Exception('An error occcured during remote execution')
return remote | Execute a command on a node (id or hostname) or on a set of nodes.
:param nodes: list of targets of the command cmd. Each must be an
execo.Host.
:param cmd: string representing the command to run on the
remote nodes.
:param label: string for debugging purpose.
:param conn_params: connection parameters passed to the execo.Remote
function |
def register_types(name, *types):
"""
Register a short name for one or more content types.
"""
type_names.setdefault(name, set())
for t in types:
# Redirecting the type
if t in media_types:
type_names[media_types[t]].discard(t)
# Save the mapping
media_types[t] = name
type_names[name].add(t) | Register a short name for one or more content types. |
def _set_anycast_rp_ip(self, v, load=False):
"""
Setter method for anycast_rp_ip, mapped from YANG variable /routing_system/router/hide_pim_holder/pim/anycast_rp_ip (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast_rp_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast_rp_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("anycast_rp_ip_addr",anycast_rp_ip.anycast_rp_ip, yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='anycast-rp-ip-addr', extensions={u'tailf-common': {u'info': u'Set Anycast RP address and peer address', u'cli-suppress-mode': None, u'hidden': u'full', u'callpoint': u'PimAnycastRpIpCfgCallpoint', u'cli-suppress-list-no': None}}), is_container='list', yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Anycast RP address and peer address', u'cli-suppress-mode': None, u'hidden': u'full', u'callpoint': u'PimAnycastRpIpCfgCallpoint', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """anycast_rp_ip must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("anycast_rp_ip_addr",anycast_rp_ip.anycast_rp_ip, yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='anycast-rp-ip-addr', extensions={u'tailf-common': {u'info': u'Set Anycast RP address and peer address', u'cli-suppress-mode': None, u'hidden': u'full', u'callpoint': u'PimAnycastRpIpCfgCallpoint', u'cli-suppress-list-no': None}}), is_container='list', yang_name="anycast-rp-ip", rest_name="anycast-rp-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Anycast RP address and peer address', u'cli-suppress-mode': None, u'hidden': u'full', u'callpoint': u'PimAnycastRpIpCfgCallpoint', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True)""",
})
self.__anycast_rp_ip = t
if hasattr(self, '_set'):
self._set() | Setter method for anycast_rp_ip, mapped from YANG variable /routing_system/router/hide_pim_holder/pim/anycast_rp_ip (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_anycast_rp_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_anycast_rp_ip() directly. |
def register_instances(name, instances, region=None, key=None, keyid=None,
profile=None):
'''
Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from
the ``instances`` list does not remove it from the ELB.
name
The name of the Elastic Load Balancer to add EC2 instances to.
instances
A list of EC2 instance IDs that this Elastic Load Balancer should
distribute traffic to. This state will only ever append new instances
to the ELB. EC2 instances already associated with this ELB will not be
removed if they are not in the ``instances`` list.
.. versionadded:: 2015.8.0
.. code-block:: yaml
add-instances:
boto_elb.register_instances:
- name: myloadbalancer
- instances:
- instance-id1
- instance-id2
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
lb = __salt__['boto_elb.exists'](name, region, key, keyid, profile)
if not lb:
msg = 'Could not find lb {0}'.format(name)
log.error(msg)
ret.update({'comment': msg, 'result': False})
return ret
health = __salt__['boto_elb.get_instance_health'](
name, region, key, keyid, profile)
nodes = [value['instance_id'] for value in health
if value['description'] != 'Instance deregistration currently in progress.']
new = [value for value in instances if value not in nodes]
if not new:
msg = 'Instance/s {0} already exist.'.format(six.text_type(instances).strip('[]'))
log.debug(msg)
ret.update({'comment': msg})
return ret
if __opts__['test']:
ret['comment'] = 'ELB {0} is set to register : {1}.'.format(name, new)
ret['result'] = None
return ret
state = __salt__['boto_elb.register_instances'](
name, instances, region, key, keyid, profile)
if state:
msg = 'Load Balancer {0} has been changed'.format(name)
log.info(msg)
new = set().union(nodes, instances)
ret.update({'comment': msg, 'changes': {'old': '\n'.join(nodes),
'new': '\n'.join(list(new))}})
else:
msg = 'Load balancer {0} failed to add instances'.format(name)
log.error(msg)
ret.update({'comment': msg, 'result': False})
return ret | Add EC2 instance(s) to an Elastic Load Balancer. Removing an instance from
the ``instances`` list does not remove it from the ELB.
name
The name of the Elastic Load Balancer to add EC2 instances to.
instances
A list of EC2 instance IDs that this Elastic Load Balancer should
distribute traffic to. This state will only ever append new instances
to the ELB. EC2 instances already associated with this ELB will not be
removed if they are not in the ``instances`` list.
.. versionadded:: 2015.8.0
.. code-block:: yaml
add-instances:
boto_elb.register_instances:
- name: myloadbalancer
- instances:
- instance-id1
- instance-id2 |
def await_any_transforms_exist(cli, transform_paths, does_exist=DEFAULT_TRANSFORM_EXISTS, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
"""
Waits for a transform to exist based on does_exist.
:param cli:
:param transform_paths: An array of transform paths [...]
:param does_exist: Whether or not to await for exist state (True | False)
:param timeout_seconds: How long until this returns with failure
:return: bool
"""
message_payload = {
"transform_paths": transform_paths,
"do_exist": does_exist,
"match_mode": "Any",
"timeout": timeout_seconds
}
msg = message.Message("await.unity.transform.exists", message_payload)
cli.send_message(msg)
response = cli.read_message()
verify_response(response)
return bool(response['payload']['success']) | Waits for a transform to exist based on does_exist.
:param cli:
:param transform_paths: An array of transform paths [...]
:param does_exist: Whether or not to await for exist state (True | False)
:param timeout_seconds: How long until this returns with failure
:return: bool |
def set_cte(self, cte_id, sql):
"""This is the equivalent of what self.extra_ctes[cte_id] = sql would
do if extra_ctes were an OrderedDict
"""
for cte in self.extra_ctes:
if cte['id'] == cte_id:
cte['sql'] = sql
break
else:
self.extra_ctes.append(
{'id': cte_id, 'sql': sql}
) | This is the equivalent of what self.extra_ctes[cte_id] = sql would
do if extra_ctes were an OrderedDict |
def _search(self, searchterm, pred, **args):
"""
Search for things using labels
"""
# TODO: DRY with sparql_ontol_utils
searchterm = searchterm.replace('%','.*')
namedGraph = get_named_graph(self.handle)
query = """
prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>
SELECT ?c WHERE {{
GRAPH <{g}> {{
?c {pred} ?l
FILTER regex(?l,'{s}','i')
}}
}}
""".format(pred=pred, s=searchterm, g=namedGraph)
bindings = run_sparql(query)
return [r['c']['value'] for r in bindings] | Search for things using labels |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FieldTypeContext for this FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeContext
"""
if self._context is None:
self._context = FieldTypeContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FieldTypeContext for this FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeContext |
def run(self, module, options):
"""
Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict``
"""
logger.debug("Running halstead harvester")
results = {}
for filename, details in dict(self.harvester.results).items():
results[filename] = {}
for instance in details:
if isinstance(instance, list):
for item in instance:
function, report = item
results[filename][function] = self._report_to_dict(report)
else:
if isinstance(instance, str) and instance == "error":
logger.warning(
f"Failed to run Halstead harvester on {filename} : {details['error']}"
)
continue
results[filename] = self._report_to_dict(instance)
return results | Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict`` |
def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
) | Run on osd node, creates an OSD from a data disk. |
def drop_namespaces(self):
"""Drop all namespaces."""
self.session.query(NamespaceEntry).delete()
self.session.query(Namespace).delete()
self.session.commit() | Drop all namespaces. |
def likelihood(self, x, cl):
"""
X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0)
"""
# sha = x.shape
# xr = x.reshape(-1, sha[-1])
# outsha = sha[:-1]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
logger.debug("likel " + str(x.shape))
if self.modelparams["type"] == "gmmsame":
px = self.mdl[cl].score_samples(x)
# todo ošetřit více dimenzionální fv
# px = px.reshape(outsha)
elif self.modelparams["type"] == "kernel":
px = self.mdl[cl].score_samples(x)
elif self.modelparams["type"] == "gaussian_kde":
# print x
# np.log because it is likelihood
# @TODO Zde je patrně problém s reshape
# old
# px = np.log(self.mdl[cl](x.reshape(-1)))
# new
px = np.log(self.mdl[cl](x))
# px = px.reshape(outsha)
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
elif self.modelparams["type"] == "dpgmm":
# todo here is a hack
# dpgmm z nějakého důvodu nefunguje pro naše data
# vždy natrénuje jednu složku v blízkosti nuly
# patrně to bude mít něco společného s parametrem alpha
# přenásobí-li se to malým číslem, zázračně to chodí
logger.warning(".score() replaced with .score_samples() . Check it.")
# px = self.mdl[cl].score(x * 0.01)
px = self.mdl[cl].score_samples(x * 0.01)
elif self.modelparams["type"] == "stored":
px = self.mdl[cl].score(x)
return px | X = numpy.random.random([2,3,4])
# we have data 2x3 with fature vector with 4 fatures
Use likelihoodFromImage() function for 3d image input
m.likelihood(X,0) |
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super(ForumView, self).get_context_data(**kwargs)
# Insert the considered forum into the context
context['forum'] = self.get_forum()
# Get the list of forums that have the current forum as parent
context['sub_forums'] = ForumVisibilityContentTree.from_forums(
self.request.forum_permission_handler.forum_list_filter(
context['forum'].get_descendants(), self.request.user,
),
)
# The announces will be displayed on each page of the forum
context['announces'] = list(
self.get_forum()
.topics.select_related('poster', 'last_post', 'last_post__poster')
.filter(type=Topic.TOPIC_ANNOUNCE)
)
# Determines the topics that have not been read by the current user
context['unread_topics'] = TrackingHandler(self.request).get_unread_topics(
list(context[self.context_object_name]) + context['announces'], self.request.user,
)
return context | Returns the context data to provide to the template. |
def start_plasma_store(stdout_file=None,
stderr_file=None,
object_store_memory=None,
plasma_directory=None,
huge_pages=False,
plasma_store_socket_name=None):
"""This method starts an object store process.
Args:
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started.
"""
object_store_memory, plasma_directory = determine_plasma_store_config(
object_store_memory, plasma_directory, huge_pages)
if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES:
raise ValueError("Attempting to cap object store memory usage at {} "
"bytes, but the minimum allowed is {} bytes.".format(
object_store_memory,
ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES))
# Print the object store memory using two decimal places.
object_store_memory_str = (object_store_memory / 10**7) / 10**2
logger.info("Starting the Plasma object store with {} GB memory "
"using {}.".format(
round(object_store_memory_str, 2), plasma_directory))
# Start the Plasma store.
process_info = _start_plasma_store(
object_store_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
socket_name=plasma_store_socket_name)
return process_info | This method starts an object store process.
Args:
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started. |
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = -\\beta^{2}\\frac{d\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in gamma distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
# hess = (self.beta - 1.) / (y - link_f)**2
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
# uncensored = (1-c)* (-(y ** self.r) * np.exp(-link_f))
# censored = -c*np.exp(-link_f)*y**self.r
uncensored = (1-c)*(1/link_f**2 -2*y**self.r/link_f**3)
censored = -c*2*y**self.r/link_f**3
hess = uncensored + censored
# hess = -(y ** self.r) * np.exp(-link_f)
return hess | Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = -\\beta^{2}\\frac{d\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in gamma distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i)) |
def _box_click(self, event):
"""Check or uncheck box when clicked."""
x, y, widget = event.x, event.y, event.widget
elem = widget.identify("element", x, y)
if "image" in elem:
# a box was clicked
item = self.identify_row(y)
if self.tag_has("unchecked", item) or self.tag_has("tristate", item):
self._check_ancestor(item)
self._check_descendant(item)
else:
self._uncheck_descendant(item)
self._uncheck_ancestor(item) | Check or uncheck box when clicked. |
def calculate_size(name, permits, timeout):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size |
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals) | Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage. |
def read_sample(filename):
"""!
@brief Returns data sample from simple text file.
@details This function should be used for text file with following format:
@code
point_1_coord_1 point_1_coord_2 ... point_1_coord_n
point_2_coord_1 point_2_coord_2 ... point_2_coord_n
... ...
@endcode
@param[in] filename (string): Path to file with data.
@return (list) Points where each point represented by list of coordinates.
"""
file = open(filename, 'r')
sample = [[float(val) for val in line.split()] for line in file if len(line.strip()) > 0]
file.close()
return sample | !
@brief Returns data sample from simple text file.
@details This function should be used for text file with following format:
@code
point_1_coord_1 point_1_coord_2 ... point_1_coord_n
point_2_coord_1 point_2_coord_2 ... point_2_coord_n
... ...
@endcode
@param[in] filename (string): Path to file with data.
@return (list) Points where each point represented by list of coordinates. |
def has_scheduled_methods(cls):
"""Decorator; use this on a class for which some methods have been
decorated with :func:`schedule` or :func:`schedule_hint`. Those methods
are then tagged with the attribute `__member_of__`, so that we may
serialise and retrieve the correct method. This should be considered
a patch to a flaw in the Python object model."""
for member in cls.__dict__.values():
if hasattr(member, '__wrapped__'):
member.__wrapped__.__member_of__ = cls
return cls | Decorator; use this on a class for which some methods have been
decorated with :func:`schedule` or :func:`schedule_hint`. Those methods
are then tagged with the attribute `__member_of__`, so that we may
serialise and retrieve the correct method. This should be considered
a patch to a flaw in the Python object model. |
def tune(device, **kwargs):
'''
Set attributes for the specified device
CLI Example:
.. code-block:: bash
salt '*' disk.tune /dev/sda1 read-ahead=1024 read-write=True
Valid options are: ``read-ahead``, ``filesystem-read-ahead``,
``read-only``, ``read-write``.
See the ``blockdev(8)`` manpage for a more complete description of these
options.
'''
kwarg_map = {'read-ahead': 'setra',
'filesystem-read-ahead': 'setfra',
'read-only': 'setro',
'read-write': 'setrw'}
opts = ''
args = []
for key in kwargs:
if key in kwarg_map:
switch = kwarg_map[key]
if key != 'read-write':
args.append(switch.replace('set', 'get'))
else:
args.append('getro')
if kwargs[key] == 'True' or kwargs[key] is True:
opts += '--{0} '.format(key)
else:
opts += '--{0} {1} '.format(switch, kwargs[key])
cmd = 'blockdev {0}{1}'.format(opts, device)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return dump(device, args) | Set attributes for the specified device
CLI Example:
.. code-block:: bash
salt '*' disk.tune /dev/sda1 read-ahead=1024 read-write=True
Valid options are: ``read-ahead``, ``filesystem-read-ahead``,
``read-only``, ``read-write``.
See the ``blockdev(8)`` manpage for a more complete description of these
options. |
def to_aws_format(tags):
"""Convert the Ray node name tag to the AWS-specific 'Name' tag."""
if TAG_RAY_NODE_NAME in tags:
tags["Name"] = tags[TAG_RAY_NODE_NAME]
del tags[TAG_RAY_NODE_NAME]
return tags | Convert the Ray node name tag to the AWS-specific 'Name' tag. |
def connect(self):
"""
Sets up your Phabricator session, it's not necessary to call
this directly
"""
if self.token:
self.phab_session = {'token': self.token}
return
req = self.req_session.post('%s/api/conduit.connect' % self.host, data={
'params': json.dumps(self.connect_params),
'output': 'json',
'__conduit__': True,
})
# Parse out the response (error handling ommitted)
result = req.json()['result']
self.phab_session = {
'sessionKey': result['sessionKey'],
'connectionID': result['connectionID'],
} | Sets up your Phabricator session, it's not necessary to call
this directly |
def get_token(request):
"""
Returns the token model instance associated with the given request token key.
If no user is retrieved AnonymousToken is returned.
"""
if (not request.META.get(header_name_to_django(auth_token_settings.HEADER_NAME)) and
config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME):
ovetaker_auth_token = request.COOKIES.get(config.CHAMBER_MULTIDOMAINS_OVERTAKER_AUTH_COOKIE_NAME)
token = get_object_or_none(Token, key=ovetaker_auth_token, is_active=True)
if utils.get_user_from_token(token).is_authenticated():
return token
return utils.get_token(request) | Returns the token model instance associated with the given request token key.
If no user is retrieved AnonymousToken is returned. |
def size(self):
""" Returns the number of connections cached by the pool. """
return sum(q.qsize() for q in self._connections.values()) + len(self._fairies) | Returns the number of connections cached by the pool. |
def _parse_broadcast(self, msg):
"""
Given a broacast message, returns the message that was broadcast.
"""
# get message, remove surrounding quotes, and unescape
return self._unescape(self._get_type(msg[self.broadcast_prefix_len:])) | Given a broacast message, returns the message that was broadcast. |
def get_random_submission(self, subreddit='all'):
"""Return a random Submission object.
:param subreddit: Limit the submission to the specified
subreddit(s). Default: all
"""
url = self.config['subreddit_random'].format(
subreddit=six.text_type(subreddit))
try:
item = self.request_json(url,
params={'unique': self._unique_count})
self._unique_count += 1 # Avoid network-level caching
return objects.Submission.from_json(item)
except errors.RedirectException as exc:
self._unique_count += 1
return self.get_submission(exc.response_url)
raise errors.ClientException('Expected exception not raised.') | Return a random Submission object.
:param subreddit: Limit the submission to the specified
subreddit(s). Default: all |
def send_produce_request(self, payloads=None, acks=1,
timeout=DEFAULT_REPLICAS_ACK_MSECS,
fail_on_error=True, callback=None):
"""
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Parameters
----------
payloads:
list of ProduceRequest
acks:
How many Kafka broker replicas need to write before
the leader replies with a response
timeout:
How long the server has to receive the acks from the
replicas before returning an error.
fail_on_error:
boolean, should we raise an Exception if we encounter an API error?
callback:
function, instead of returning the ProduceResponse,
first pass it through this function
Return
------
a deferred which callbacks with a list of ProduceResponse
Raises
------
FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError
"""
encoder = partial(
KafkaCodec.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaCodec.decode_produce_response
resps = yield self._send_broker_aware_request(
payloads, encoder, decoder)
returnValue(self._handle_responses(resps, fail_on_error, callback)) | Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Parameters
----------
payloads:
list of ProduceRequest
acks:
How many Kafka broker replicas need to write before
the leader replies with a response
timeout:
How long the server has to receive the acks from the
replicas before returning an error.
fail_on_error:
boolean, should we raise an Exception if we encounter an API error?
callback:
function, instead of returning the ProduceResponse,
first pass it through this function
Return
------
a deferred which callbacks with a list of ProduceResponse
Raises
------
FailedPayloadsError, LeaderUnavailableError, PartitionUnavailableError |
def str_repl(self, inputstring, **kwargs):
"""Add back strings."""
out = []
comment = None
string = None
for i, c in enumerate(append_it(inputstring, None)):
try:
if comment is not None:
if c is not None and c in nums:
comment += c
elif c == unwrapper and comment:
ref = self.get_ref("comment", comment)
if out and not out[-1].endswith("\n"):
out[-1] = out[-1].rstrip(" ")
if not self.minify:
out[-1] += " " # put two spaces before comment
out.append("#" + ref)
comment = None
else:
raise CoconutInternalException("invalid comment marker in", getline(i, inputstring))
elif string is not None:
if c is not None and c in nums:
string += c
elif c == unwrapper and string:
text, strchar = self.get_ref("str", string)
out.append(strchar + text + strchar)
string = None
else:
raise CoconutInternalException("invalid string marker in", getline(i, inputstring))
elif c is not None:
if c == "#":
comment = ""
elif c == strwrapper:
string = ""
else:
out.append(c)
except CoconutInternalException as err:
complain(err)
if comment is not None:
out.append(comment)
comment = None
if string is not None:
out.append(string)
string = None
out.append(c)
return "".join(out) | Add back strings. |
def mklink():
"""
Like cmd.exe's mklink except it will infer directory status of the
target.
"""
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] link target")
parser.add_option(
'-d', '--directory',
help="Target is a directory (only necessary if not present)",
action="store_true")
options, args = parser.parse_args()
try:
link, target = args
except ValueError:
parser.error("incorrect number of arguments")
symlink(target, link, options.directory)
sys.stdout.write("Symbolic link created: %(link)s --> %(target)s\n" % vars()) | Like cmd.exe's mklink except it will infer directory status of the
target. |
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _to_str(table_name) + '()'
request.headers = [('Accept', _to_str(accept))]
request.query = [
('$filter', _to_str(filter)),
('$select', _to_str(select)),
('$top', _int_to_str(max_results)),
('NextPartitionKey', _to_str(next_partition_key)),
('NextRowKey', _to_str(next_row_key)),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_json_response_to_entities(response, property_resolver) | Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity` |
def get_src_address_from_data(self, decoded=True):
"""
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return:
"""
src_address_label = next((lbl for lbl in self.message_type if lbl.field_type
and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None)
if src_address_label:
start, end = self.get_label_range(src_address_label, view=1, decode=decoded)
if decoded:
src_address = self.decoded_hex_str[start:end]
else:
src_address = self.plain_hex_str[start:end]
else:
src_address = None
return src_address | Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message
Return None otherwise
:param decoded:
:return: |
async def get_suggested_entities(self, get_suggested_entities_request):
"""Return suggested contacts."""
response = hangouts_pb2.GetSuggestedEntitiesResponse()
await self._pb_request('contacts/getsuggestedentities',
get_suggested_entities_request, response)
return response | Return suggested contacts. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.