code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _set_widths(self, row, proc_group):
"""Update auto-width Fields based on `row`.
Parameters
----------
row : dict
proc_group : {'default', 'override'}
Whether to consider 'default' or 'override' key for pre- and
post-format processors.
Returns
-------
True if any widths required adjustment.
"""
width_free = self.style["width_"] - sum(
[sum(self.fields[c].width for c in self.columns),
self.width_separtor])
if width_free < 0:
width_fixed = sum(
[sum(self.fields[c].width for c in self.columns
if c not in self.autowidth_columns),
self.width_separtor])
assert width_fixed > self.style["width_"], "bug in width logic"
raise elements.StyleError(
"Fixed widths specified in style exceed total width")
elif width_free == 0:
lgr.debug("Not checking widths; no free width left")
return False
lgr.debug("Checking width for row %r", row)
adjusted = False
for column in sorted(self.columns, key=lambda c: self.fields[c].width):
# ^ Sorting the columns by increasing widths isn't necessary; we do
# it so that columns that already take up more of the screen don't
# continue to grow and use up free width before smaller columns
# have a chance to claim some.
if width_free < 1:
lgr.debug("Giving up on checking widths; no free width left")
break
if column in self.autowidth_columns:
field = self.fields[column]
lgr.debug("Checking width of column %r "
"(field width: %d, free width: %d)",
column, field.width, width_free)
# If we've added any style transform functions as
# pre-format processors, we want to measure the width
# of their result rather than the raw value.
if field.pre[proc_group]:
value = field(row[column], keys=[proc_group],
exclude_post=True)
else:
value = row[column]
value = six.text_type(value)
value_width = len(value)
wmax = self.autowidth_columns[column]["max"]
if value_width > field.width:
width_old = field.width
width_available = width_free + field.width
width_new = min(value_width,
wmax or width_available,
width_available)
if width_new > width_old:
adjusted = True
field.width = width_new
lgr.debug("Adjusting width of %r column from %d to %d "
"to accommodate value %r",
column, width_old, field.width, value)
self._truncaters[column].length = field.width
width_free -= field.width - width_old
lgr.debug("Free width is %d after processing column %r",
width_free, column)
return adjusted | Update auto-width Fields based on `row`.
Parameters
----------
row : dict
proc_group : {'default', 'override'}
Whether to consider 'default' or 'override' key for pre- and
post-format processors.
Returns
-------
True if any widths required adjustment. |
def _handle_nodes(nodes: MaybeNodeList) -> List[BaseEntity]:
"""Handle node(s) that might be dictionaries."""
if isinstance(nodes, BaseEntity):
return [nodes]
return [
(
parse_result_to_dsl(node)
if not isinstance(node, BaseEntity) else
node
)
for node in nodes
] | Handle node(s) that might be dictionaries. |
def set_vf0(self, vf):
"""set value for self.vf0 and dae.y[self.vf]"""
self.vf0 = vf
self.system.dae.y[self.vf] = matrix(vf) | set value for self.vf0 and dae.y[self.vf] |
def sharded_cluster_link(rel, cluster_id=None,
shard_id=None, router_id=None, self_rel=False):
"""Helper for getting a ShardedCluster link document, given a rel."""
clusters_href = '/v1/sharded_clusters'
link = _SHARDED_CLUSTER_LINKS[rel].copy()
link['href'] = link['href'].format(**locals())
link['rel'] = 'self' if self_rel else rel
return link | Helper for getting a ShardedCluster link document, given a rel. |
def encode(self, delimiter=';'):
"""Encode a command string from message."""
try:
return delimiter.join([str(f) for f in [
self.node_id,
self.child_id,
int(self.type),
self.ack,
int(self.sub_type),
self.payload,
]]) + '\n'
except ValueError:
_LOGGER.error('Error encoding message to gateway') | Encode a command string from message. |
def token_required(view_func):
"""
Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized.
"""
def _parse_auth_header(auth_header):
"""
Parse the `Authorization` header
Expected format: `WATCHMAN-TOKEN Token="ABC123"`
"""
# TODO: Figure out full set of allowed characters
# http://stackoverflow.com/questions/19028068/illegal-characters-in-http-headers
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
reg = re.compile('(\w+)[=] ?"?([\w-]+)"?')
header_dict = dict(reg.findall(auth_header))
return header_dict['Token']
def _get_passed_token(request):
"""
Try to get the passed token, starting with the header and fall back to `GET` param
"""
try:
auth_header = request.META['HTTP_AUTHORIZATION']
token = _parse_auth_header(auth_header)
except KeyError:
token = request.GET.get(settings.WATCHMAN_TOKEN_NAME)
return token
def _validate_token(request):
if settings.WATCHMAN_TOKENS:
watchman_tokens = settings.WATCHMAN_TOKENS.split(',')
elif settings.WATCHMAN_TOKEN:
watchman_tokens = [settings.WATCHMAN_TOKEN, ]
else:
return True
return _get_passed_token(request) in watchman_tokens
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if _validate_token(request):
return view_func(request, *args, **kwargs)
return HttpResponseForbidden()
return _wrapped_view | Decorator which ensures that one of the WATCHMAN_TOKENS is provided if set.
WATCHMAN_TOKEN_NAME can also be set if the token GET parameter must be
customized. |
def read_csi_node(self, name, **kwargs):
"""
read the specified CSINode
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_csi_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSINode (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1CSINode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_csi_node_with_http_info(name, **kwargs)
else:
(data) = self.read_csi_node_with_http_info(name, **kwargs)
return data | read the specified CSINode
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_csi_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CSINode (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1CSINode
If the method is called asynchronously,
returns the request thread. |
def store_sm(smodel, filename, monitor):
"""
:param smodel: a :class:`openquake.hazardlib.nrml.SourceModel` instance
:param filename: path to an hdf5 file (cache_XXX.hdf5)
:param monitor: a Monitor instance with an .hdf5 attribute
"""
h5 = monitor.hdf5
with monitor('store source model'):
sources = h5['source_info']
source_geom = h5['source_geom']
gid = len(source_geom)
for sg in smodel:
if filename:
with hdf5.File(filename, 'r+') as hdf5cache:
hdf5cache['grp-%02d' % sg.id] = sg
srcs = []
geoms = []
for src in sg:
srcgeom = src.geom()
n = len(srcgeom)
geom = numpy.zeros(n, point3d)
geom['lon'], geom['lat'], geom['depth'] = srcgeom.T
srcs.append((sg.id, src.source_id, src.code, gid, gid + n,
src.num_ruptures, 0, 0, 0))
geoms.append(geom)
gid += n
if geoms:
hdf5.extend(source_geom, numpy.concatenate(geoms))
if sources:
hdf5.extend(sources, numpy.array(srcs, source_info_dt)) | :param smodel: a :class:`openquake.hazardlib.nrml.SourceModel` instance
:param filename: path to an hdf5 file (cache_XXX.hdf5)
:param monitor: a Monitor instance with an .hdf5 attribute |
def set_share_path(self, share_path):
"""Set application location for this resource provider.
@param share_path: a UTF-8 encoded, unquoted byte string.
"""
# if isinstance(share_path, unicode):
# share_path = share_path.encode("utf8")
assert share_path == "" or share_path.startswith("/")
if share_path == "/":
share_path = "" # This allows to code 'absPath = share_path + path'
assert share_path in ("", "/") or not share_path.endswith("/")
self.share_path = share_path | Set application location for this resource provider.
@param share_path: a UTF-8 encoded, unquoted byte string. |
def find_this(search, source=SOURCE):
"""Take a string and a filename path string and return the found value."""
print("Searching for {what}.".format(what=search))
if not search or not source:
print("Not found on source: {what}.".format(what=search))
return ""
return str(re.compile(r'.*__{what}__ = "(.*?)"'.format(
what=search), re.S).match(source).group(1)).strip() | Take a string and a filename path string and return the found value. |
def _cutadapt_trim_cmd(fastq_files, quality_format, adapters, out_files, data):
"""Trimming with cutadapt, using version installed with bcbio-nextgen.
"""
if all([utils.file_exists(x) for x in out_files]):
return out_files
if quality_format == "illumina":
quality_base = "64"
else:
quality_base = "33"
# --times=2 tries twice remove adapters which will allow things like:
# realsequenceAAAAAAadapter to remove both the poly-A and the adapter
# this behavior might not be what we want; we could also do two or
# more passes of cutadapt
cutadapt = os.path.join(os.path.dirname(sys.executable), "cutadapt")
adapter_cmd = " ".join(map(lambda x: "-a " + x, adapters))
ropts = " ".join(str(x) for x in
config_utils.get_resources("cutadapt", data["config"]).get("options", []))
base_cmd = ("{cutadapt} {ropts} --times=2 --quality-base={quality_base} "
"--quality-cutoff=5 --format=fastq "
"{adapter_cmd} ").format(**locals())
if len(fastq_files) == 2:
# support for the single-command paired trimming introduced in
# cutadapt 1.8
adapter_cmd = adapter_cmd.replace("-a ", "-A ")
base_cmd += "{adapter_cmd} ".format(adapter_cmd=adapter_cmd)
return _cutadapt_pe_cmd(fastq_files, out_files, quality_format, base_cmd, data)
else:
return _cutadapt_se_cmd(fastq_files, out_files, base_cmd, data) | Trimming with cutadapt, using version installed with bcbio-nextgen. |
def disk_vmag(hemi, retinotopy='any', to=None, **kw):
'''
disk_vmag(mesh) yields the visual magnification based on the projection of disks on the cortical
surface into the visual field.
All options accepted by mag_data() are accepted by disk_vmag().
'''
mdat = mag_data(hemi, retinotopy=retinotopy, **kw)
if pimms.is_vector(mdat): return tuple([face_vmag(m, to=to) for m in mdat])
elif pimms.is_vector(mdat.keys(), 'int'):
return pimms.lazy_map({k: curry(lambda k: face_vmag(mdat[k], to=to), k)
for k in six.iterkeys(mdat)})
#TODO: implement the disk_vmag calculation using mdat
# convert to the appropriate type according to the to param
raise NotImplementedError() | disk_vmag(mesh) yields the visual magnification based on the projection of disks on the cortical
surface into the visual field.
All options accepted by mag_data() are accepted by disk_vmag(). |
def dispatch_hook(cls, _pkt=None, *args, **kargs):
"""
Returns the right parameter set class.
"""
cls = conf.raw_layer
if _pkt is not None:
ptype = orb(_pkt[0])
return globals().get(_param_set_cls.get(ptype), conf.raw_layer)
return cls | Returns the right parameter set class. |
def literal_matches_objectliteral(v1: Literal, v2: ShExJ.ObjectLiteral) -> bool:
""" Compare :py:class:`rdflib.Literal` with :py:class:`ShExJ.objectLiteral` """
v2_lit = Literal(str(v2.value), datatype=iriref_to_uriref(v2.type), lang=str(v2.language) if v2.language else None)
return v1 == v2_lit | Compare :py:class:`rdflib.Literal` with :py:class:`ShExJ.objectLiteral` |
def preserve_builtin_query_params(url, request=None):
"""
Given an incoming request, and an outgoing URL representation,
append the value of any built-in query parameters.
"""
if request is None:
return url
overrides = [
api_settings.URL_FORMAT_OVERRIDE,
]
for param in overrides:
if param and (param in request.GET):
value = request.GET[param]
url = replace_query_param(url, param, value)
return url | Given an incoming request, and an outgoing URL representation,
append the value of any built-in query parameters. |
def consult_filters(self, url_info: URLInfo, url_record: URLRecord, is_redirect: bool=False) \
-> Tuple[bool, str, dict]:
'''Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info`
'''
if not self._url_filter:
return True, 'nofilters', None
test_info = self._url_filter.test_info(url_info, url_record)
verdict = test_info['verdict']
if verdict:
reason = 'filters'
elif is_redirect and self.is_only_span_hosts_failed(test_info):
verdict = True
reason = 'redirect'
else:
reason = 'filters'
return verdict, reason, test_info | Consult the URL filter.
Args:
url_record: The URL record.
is_redirect: Whether the request is a redirect and it is
desired that it spans hosts.
Returns
tuple:
1. bool: The verdict
2. str: A short reason string: nofilters, filters, redirect
3. dict: The result from :func:`DemuxURLFilter.test_info` |
def set_value(self, attribute, section, value):
"""
Sets requested attribute value.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x109304209>
>>> sections_file_parser.set_value("Attribute 3", "Section C", "Value C")
True
:param attribute: Attribute name.
:type attribute: unicode
:param section: Section containing the searched attribute.
:type section: unicode
:param value: Attribute value.
:type value: object
:return: Definition success.
:rtype: bool
"""
if not self.section_exists(section):
LOGGER.debug("> Adding '{0}' section.".format(section))
self.__sections[section] = OrderedDict() if self.__preserve_order else dict()
self.__sections[section][attribute] = value
return True | Sets requested attribute value.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x109304209>
>>> sections_file_parser.set_value("Attribute 3", "Section C", "Value C")
True
:param attribute: Attribute name.
:type attribute: unicode
:param section: Section containing the searched attribute.
:type section: unicode
:param value: Attribute value.
:type value: object
:return: Definition success.
:rtype: bool |
def get_assets_by_repository(self, repository_id):
"""Gets the list of ``Assets`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.AssetList) - list of related assets
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_asset_lookup_session_for_repository(repository_id, proxy=self._proxy)
lookup_session.use_isolated_repository_view()
return lookup_session.get_assets() | Gets the list of ``Assets`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.AssetList) - list of related assets
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def get_schemas():
"""Return a dict of schema names mapping to a Schema.
The schema is of type schul_cloud_resources_api_v1.schema.Schema
"""
schemas = {}
for name in os.listdir(JSON_PATH):
if name not in NO_SCHEMA:
schemas[name] = Schema(name)
return schemas | Return a dict of schema names mapping to a Schema.
The schema is of type schul_cloud_resources_api_v1.schema.Schema |
def to_unicode(s, encoding=None, errors='strict'):
"""
Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode
"""
encoding = encoding or 'utf-8'
if is_unicode(s):
return s
elif is_strlike(s):
return s.decode(encoding, errors)
else:
if six.PY2:
return str(s).decode(encoding, errors)
else:
return str(s) | Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode |
def _pseudoinverse(self, A, tol=1.0e-10):
"""
Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead?
"""
# DEBUG
# TODO: Should we use pinv, or _pseudoinverse?
# return np.linalg.pinv(A)
# Get size
[M, N] = A.shape
if N != M:
raise DataError("pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
M, N))
# Make sure A contains no nan.
if(np.any(np.isnan(A))):
print("attempted to compute pseudoinverse of A =")
print(A)
raise ParameterError("A contains nan.")
# DEBUG
diagonal_loading = False
if diagonal_loading:
# Modify matrix by diagonal loading.
eigs = linalg.eigvalsh(A)
most_negative_eigenvalue = eigs.min()
if (most_negative_eigenvalue < 0.0):
print("most negative eigenvalue = %e" % most_negative_eigenvalue)
# Choose loading value.
gamma = -most_negative_eigenvalue * 1.05
# Modify Theta by diagonal loading
A += gamma * np.eye(A.shape[0])
# Compute SVD of A.
[U, S, Vt] = linalg.svd(A)
# Compute pseudoinverse by taking square root of nonzero singular
# values.
Ainv = np.matrix(np.zeros([M, M], dtype=np.float64))
for k in range(M):
if (abs(S[k]) > tol * abs(S[0])):
Ainv += (1.0/S[k]) * np.outer(U[:, k], Vt[k, :]).T
return Ainv | Compute the Moore-Penrose pseudoinverse.
REQUIRED ARGUMENTS
A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed
RETURN VALUES
Ainv (np KxK matrix) - the pseudoinverse
OPTIONAL VALUES
tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)
NOTES
This implementation is provided because the 'pinv' function of np is broken in the version we were using.
TODO
Can we get rid of this and use np.linalg.pinv instead? |
def render_entry(entry_id, slug_text='', category=''):
""" Render an entry page.
Arguments:
entry_id -- The numeric ID of the entry to render
slug_text -- The expected URL slug text
category -- The expected category
"""
# pylint: disable=too-many-return-statements
# check if it's a valid entry
record = model.Entry.get(id=entry_id)
if not record:
# It's not a valid entry, so see if it's a redirection
path_redirect = get_redirect()
if path_redirect:
return path_redirect
logger.info("Attempted to retrieve nonexistent entry %d", entry_id)
raise http_error.NotFound("No such entry")
# see if the file still exists
if not os.path.isfile(record.file_path):
expire_record(record)
# See if there's a redirection
path_redirect = get_redirect()
if path_redirect:
return path_redirect
raise http_error.NotFound("No such entry")
# Show an access denied error if the entry has been set to draft mode
if record.status == model.PublishStatus.DRAFT.value:
raise http_error.Forbidden("Entry not available")
# Show a gone error if the entry has been deleted
if record.status == model.PublishStatus.GONE.value:
raise http_error.Gone()
# check if the canonical URL matches
if record.category != category or record.slug_text != slug_text:
# This could still be a redirected path...
path_redirect = get_redirect()
if path_redirect:
return path_redirect
# Redirect to the canonical URL
return redirect(url_for('entry',
entry_id=entry_id,
category=record.category,
slug_text=record.slug_text))
# if the entry canonically redirects, do that now
entry_redirect = record.redirect_url
if entry_redirect:
return redirect(entry_redirect)
entry_template = (record.entry_template
or Category(category).get('Entry-Template')
or 'entry')
tmpl = map_template(category, entry_template)
if not tmpl:
raise http_error.BadRequest("Missing entry template")
# Get the viewable entry
entry_obj = Entry(record)
# does the entry-id header mismatch? If so the old one is invalid
if int(entry_obj.get('Entry-ID')) != record.id:
expire_record(record)
return redirect(url_for('entry', entry_id=int(entry_obj.get('Entry-Id'))))
rendered, etag = render_publ_template(
tmpl,
_url_root=request.url_root,
entry=entry_obj,
category=Category(category))
if request.if_none_match.contains(etag):
return 'Not modified', 304
return rendered, {'Content-Type': mime_type(tmpl),
'ETag': etag} | Render an entry page.
Arguments:
entry_id -- The numeric ID of the entry to render
slug_text -- The expected URL slug text
category -- The expected category |
def check_version(mod, required):
"""Require minimum version of module using ``__version__`` member."""
vers = tuple(int(v) for v in mod.__version__.split('.')[:3])
if vers < required:
req = '.'.join(str(v) for v in required)
raise ImproperlyConfigured(
"Module \"%s\" version (%s) must be >= %s." %
(mod.__name__, mod.__version__, req)) | Require minimum version of module using ``__version__`` member. |
def get_lead(self, lead_id):
"""
Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict.
"""
params = self.base_params
endpoint = self.base_endpoint.format('leads/' + str(lead_id))
return self._query_hunter(endpoint, params) | Get a specific lead saved on your account.
:param lead_id: Id of the lead to search. Must be defined.
:return: Lead found as a dict. |
def visit_decorators(self, node, parent):
"""visit a Decorators node by returning a fresh instance of it"""
# /!\ node is actually a _ast.FunctionDef node while
# parent is an astroid.nodes.FunctionDef node
newnode = nodes.Decorators(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode) for child in node.decorator_list])
return newnode | visit a Decorators node by returning a fresh instance of it |
def check_webhook_validation(app_configs=None, **kwargs):
"""
Check that DJSTRIPE_WEBHOOK_VALIDATION is valid
"""
from . import settings as djstripe_settings
messages = []
validation_options = ("verify_signature", "retrieve_event")
if djstripe_settings.WEBHOOK_VALIDATION is None:
messages.append(
checks.Warning(
"Webhook validation is disabled, this is a security risk if the webhook view is enabled",
hint="Set DJSTRIPE_WEBHOOK_VALIDATION to one of {}".format(
", ".join(validation_options)
),
id="djstripe.W004",
)
)
elif djstripe_settings.WEBHOOK_VALIDATION == "verify_signature":
if not djstripe_settings.WEBHOOK_SECRET:
messages.append(
checks.Critical(
"DJSTRIPE_WEBHOOK_VALIDATION='verify_signature' but DJSTRIPE_WEBHOOK_SECRET is not set",
hint="Set DJSTRIPE_WEBHOOK_SECRET or set DJSTRIPE_WEBHOOK_VALIDATION='retrieve_event'",
id="djstripe.C006",
)
)
elif djstripe_settings.WEBHOOK_VALIDATION not in validation_options:
messages.append(
checks.Critical(
"DJSTRIPE_WEBHOOK_VALIDATION is invalid",
hint="Set DJSTRIPE_WEBHOOK_VALIDATION to one of {} or None".format(
", ".join(validation_options)
),
id="djstripe.C007",
)
)
return messages | Check that DJSTRIPE_WEBHOOK_VALIDATION is valid |
def _recursive_split(self, bbox, zoom_level, column, row):
"""Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the OSM grid
:type row: int
"""
if zoom_level == self.zoom_level:
self.bbox_list.append(bbox)
self.info_list.append({'zoom_level': zoom_level,
'index_x': column,
'index_y': row})
return
bbox_partition = bbox.get_partition(2, 2)
for i, j in itertools.product(range(2), range(2)):
if self._intersects_area(bbox_partition[i][j]):
self._recursive_split(bbox_partition[i][j], zoom_level + 1, 2 * column + i, 2 * row + 1 - j) | Method that recursively creates bounding boxes of OSM grid that intersect the area.
:param bbox: Bounding box
:type bbox: BBox
:param zoom_level: OSM zoom level
:type zoom_level: int
:param column: Column in the OSM grid
:type column: int
:param row: Row in the OSM grid
:type row: int |
def dump_normals(dataset_dir, data_dir, dataset, root=None, compress=True):
"""dump vtkjs normal vectors"""
if root is None:
root = {}
normals = dataset.GetPointData().GetNormals()
if normals:
dumped_array = dump_data_array(dataset_dir, data_dir, normals, {}, compress)
root['pointData']['activeNormals'] = len(root['pointData']['arrays'])
root['pointData']['arrays'].append({'data': dumped_array}) | dump vtkjs normal vectors |
def calculateDatasets(self, scene, axes, datasets):
"""
Builds the datasets for this renderer. Each renderer will need to
subclass and implemenent this method, otherwise, no data will be
shown in the chart.
:param scene | <XChartScene>
axes | [<
datasets | [<XChartDataset>, ..]
"""
items = self.calculateDatasetItems(scene, datasets)
if not items:
scene.clear()
return
rect = self.buildData('axis_rect')
half_size = self.maximumBarSize() / 2.0
for dataset, item in items.items():
path = QPainterPath()
subpaths = []
for value in dataset.values():
pos = self.pointAt(axes, value)
radius = min(rect.bottom() - pos.y(), 8)
subpath = QPainterPath()
# create a vertical bar graph
if self.orientation() == Qt.Vertical:
subpath.moveTo(pos.x() - half_size, rect.bottom())
subpath.lineTo(pos.x() - half_size, pos.y() + radius)
subpath.quadTo(pos.x() - half_size, pos.y(),
pos.x() - half_size + radius, pos.y())
subpath.lineTo(pos.x() + half_size - radius, pos.y())
subpath.quadTo(pos.x() + half_size, pos.y(),
pos.x() + half_size, pos.y() + radius)
subpath.lineTo(pos.x() + half_size, rect.bottom())
subpath.lineTo(pos.x() - half_size, rect.bottom())
# create a horizontal bar graph
else:
subpath.moveTo(rect.left(), pos.y() - half_size)
subpath.lineTo(pos.x(), pos.y() - half_size)
subpath.lineTo(pos.x(), pos.y() + half_size)
subpath.lineTo(rect.left(), pos.y() + half_size)
subpath.lineTo(rect.left(), pos.y() - half_size)
path.addPath(subpath)
subpaths.append(subpath)
item.setPath(path)
item.setBuildData('subpaths', subpaths) | Builds the datasets for this renderer. Each renderer will need to
subclass and implemenent this method, otherwise, no data will be
shown in the chart.
:param scene | <XChartScene>
axes | [<
datasets | [<XChartDataset>, ..] |
def add_missing_children(required_children, element_children):
"""Determine if there are elements not in the children
that need to be included as blank elements in the form.
"""
element_tags = [element.tag for element in element_children]
# Loop through the elements that should be in the form.
for contained_element in required_children:
# If the element doesn't exist in the form,
# add the element to the children.
if contained_element not in element_tags:
try:
added_child = PYUNTL_DISPATCH[contained_element](content='')
except:
added_child = PYUNTL_DISPATCH[contained_element]()
element_children.append(added_child)
return element_children | Determine if there are elements not in the children
that need to be included as blank elements in the form. |
def _get_metadata_for_galaxies(
self):
"""get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_get_metadata_for_galaxies`` method')
total, batches = self._count_galaxies_requiring_metadata()
print "%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals()
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
while self.total:
thisCount += 1
self._get_3000_galaxies_needing_metadata()
dictList = self._query_ned_and_add_results_to_database(thisCount)
self.add_data_to_database_table(
dictList=dictList,
createStatement=False
)
self._count_galaxies_requiring_metadata()
self.log.debug('completed the ``_get_metadata_for_galaxies`` method')
return None | get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring |
def parse_dformat(dformat, check=True):
"""Return `dformat` or raise if it is not 'dense' or 'sparse'"""
if check and dformat not in ['dense', 'sparse']:
raise IOError(
"{} is a bad features format, please choose 'dense' or 'sparse'"
.format(dformat))
return dformat | Return `dformat` or raise if it is not 'dense' or 'sparse |
def login_with_google(self, email, oauth2_token, **kwargs):
"""Login to Todoist using Google's oauth2 authentication.
:param email: The user's Google email address.
:type email: str
:param oauth2_token: The user's Google oauth2 token.
:type oauth2_token: str
:param auto_signup: If ``1`` register an account automatically.
:type auto_signup: int
:param full_name: The full name to use if the account is registered
automatically. If no name is given an email based nickname is used.
:type full_name: str
:param timezone: The timezone to use if the account is registered
automatically. If no timezone is given one is chosen based on the
user's IP address.
:type timezone: str
:param lang: The user's language.
:type lang: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> oauth2_token = 'oauth2_token' # Get this from Google.
>>> response = api.login_with_google('john.doe@gmail.com',
... oauth2_token)
>>> user_info = response.json()
>>> full_name = user_info['full_name']
>>> print(full_name)
John Doe
"""
params = {
'email': email,
'oauth2_token': oauth2_token
}
req_func = self._get
if kwargs.get('auto_signup', 0) == 1: # POST if we're creating a user.
req_func = self._post
return req_func('login_with_google', params, **kwargs) | Login to Todoist using Google's oauth2 authentication.
:param email: The user's Google email address.
:type email: str
:param oauth2_token: The user's Google oauth2 token.
:type oauth2_token: str
:param auto_signup: If ``1`` register an account automatically.
:type auto_signup: int
:param full_name: The full name to use if the account is registered
automatically. If no name is given an email based nickname is used.
:type full_name: str
:param timezone: The timezone to use if the account is registered
automatically. If no timezone is given one is chosen based on the
user's IP address.
:type timezone: str
:param lang: The user's language.
:type lang: str
:return: The HTTP response to the request.
:rtype: :class:`requests.Response`
>>> from pytodoist.api import TodoistAPI
>>> api = TodoistAPI()
>>> oauth2_token = 'oauth2_token' # Get this from Google.
>>> response = api.login_with_google('john.doe@gmail.com',
... oauth2_token)
>>> user_info = response.json()
>>> full_name = user_info['full_name']
>>> print(full_name)
John Doe |
def check_alive_instances(self):
"""Check alive instances.
If not, log error and try to restart it
:return: None
"""
# Only for external
for instance in self.instances:
if instance in self.to_restart:
continue
if instance.is_external and instance.process and not instance.process.is_alive():
logger.error("The external module %s died unexpectedly!", instance.name)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance)
# Ok, no need to look at queue size now
continue
# Now look for maximum queue size. If above the defined value, the module may have
# a huge problem and so bailout. It's not a perfect solution, more a watchdog
# If max_queue_size is 0, don't check this
if self.daemon.max_queue_size == 0:
continue
# Check for module queue size
queue_size = 0
try:
queue_size = instance.to_q.qsize()
except Exception: # pylint: disable=broad-except
pass
if queue_size > self.daemon.max_queue_size:
logger.error("The module %s has a too important queue size (%s > %s max)!",
instance.name, queue_size, self.daemon.max_queue_size)
logger.info("Setting the module %s to restart", instance.name)
# We clean its queues, they are no more useful
instance.clear_queues(self.daemon.sync_manager)
self.set_to_restart(instance) | Check alive instances.
If not, log error and try to restart it
:return: None |
def relabel_groups_masked(group_idx, keep_group):
"""
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and
"""
keep_group = keep_group.astype(bool, copy=not keep_group[0])
if not keep_group[0]: # ensuring keep_group[0] is True makes life easier
keep_group[0] = True
relabel = np.zeros(keep_group.size, dtype=group_idx.dtype)
relabel[keep_group] = np.arange(np.count_nonzero(keep_group))
return relabel[group_idx] | group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5]
0 1 2 3 4 5
keep_group: [0 1 0 1 1 1]
ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4]
Description of above in words: remove group 2, and relabel group 3,4, and 5
to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used
in the input group_idx, but the user supplied mask said to keep group 4, so group
5 is only moved up by one place to fill the gap created by removing group 2.
That is, the mask describes which groups to remove,
the remaining groups are relabled to remove the gaps created by the falsy
elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers
to the zero group which cannot be "removed".
``keep_group`` should be bool and ``group_idx`` int.
Values in ``group_idx`` can be any order, and |
def resolve_image_as_pil(self, image_url, coords=None):
"""
Resolve an image URL to a PIL image.
Args:
coords (list) : Coordinates of the bounding box to cut from the image
Returns:
Image or region in image as PIL.Image
"""
files = self.mets.find_files(url=image_url)
if files:
image_filename = self.download_file(files[0]).local_filename
else:
image_filename = self.download_url(image_url)
if image_url not in self.image_cache['pil']:
self.image_cache['pil'][image_url] = Image.open(image_filename)
pil_image = self.image_cache['pil'][image_url]
if coords is None:
return pil_image
if image_url not in self.image_cache['cv2']:
log.debug("Converting PIL to OpenCV: %s", image_url)
color_conversion = cv2.COLOR_GRAY2BGR if pil_image.mode in ('1', 'L') else cv2.COLOR_RGB2BGR
pil_as_np_array = np.array(pil_image).astype('uint8') if pil_image.mode == '1' else np.array(pil_image)
self.image_cache['cv2'][image_url] = cv2.cvtColor(pil_as_np_array, color_conversion)
cv2_image = self.image_cache['cv2'][image_url]
poly = np.array(coords, np.int32)
log.debug("Cutting region %s from %s", coords, image_url)
region_cut = cv2_image[
np.min(poly[:, 1]):np.max(poly[:, 1]),
np.min(poly[:, 0]):np.max(poly[:, 0])
]
return Image.fromarray(region_cut) | Resolve an image URL to a PIL image.
Args:
coords (list) : Coordinates of the bounding box to cut from the image
Returns:
Image or region in image as PIL.Image |
def transform_inverse(im_tensor, mean, std):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 3
im = im_tensor.transpose((1, 2, 0))
im = im * std + mean
im = im.astype(np.uint8)
return im | transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: im [height, width, channel(RGB)] |
def isopen(self) -> bool:
"""State of backing file."""
if self._file is None:
return False
# try accessing the id attribute to see if the file is open
return bool(self._file.id) | State of backing file. |
def Call(method,url,payload=None,session=None,debug=False):
"""Execute v2 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:returns: decoded API json result
"""
if session is not None:
token = session['token']
http_session = session['http_session']
else:
if not clc._LOGIN_TOKEN_V2:
API._Login()
token = clc._LOGIN_TOKEN_V2
http_session = clc._REQUESTS_SESSION
if payload is None:
payload = {}
# If executing refs provided in API they are abs paths,
# Else refs we build in the sdk are relative
if url[0]=='/': fq_url = "%s%s" % (clc.defaults.ENDPOINT_URL_V2,url)
else: fq_url = "%s/v2/%s" % (clc.defaults.ENDPOINT_URL_V2,url)
http_session.headers.update({'Authorization': "Bearer %s" % token})
if isinstance(payload, basestring): http_session.headers['content-type'] = "Application/json" # added for server ops with str payload
else: http_session.headers['content-type'] = "application/x-www-form-urlencoded"
if method=="GET":
r = http_session.request(method,fq_url,
params=payload,
verify=API._ResourcePath('clc/cacert.pem'))
else:
r = http_session.request(method,fq_url,
data=payload,
verify=API._ResourcePath('clc/cacert.pem'))
if debug:
API._DebugRequest(request=requests.Request(method,fq_url,data=payload,headers=http_session.headers).prepare(),
response=r)
if r.status_code>=200 and r.status_code<300:
try:
return(r.json())
except:
return({})
else:
try:
e = clc.APIFailedResponse("Response code %s. %s %s %s" %
(r.status_code,r.json()['message'],method,fq_url))
e.response_status_code = r.status_code
e.response_json = r.json()
e.response_text = r.text
raise(e)
except clc.APIFailedResponse:
raise
except:
e = clc.APIFailedResponse("Response code %s. %s. %s %s" %
(r.status_code,r.text,method,fq_url))
e.response_status_code = r.status_code
e.response_json = {} # or should this be None?
e.response_text = r.text
raise(e) | Execute v2 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:returns: decoded API json result |
def rdlevenshtein_norm(source, target):
"""Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths
"""
# Compute restricted Damerau-Levenshtein distance using helper function.
# The max is always just the length of the longer string, so this is used
# to normalize result before returning it
distance = _levenshtein_compute(source, target, True)
return float(distance) / max(len(source), len(target)) | Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths |
def preprocess_cell(
self, cell: "NotebookNode", resources: dict, cell_index: int
) -> Tuple["NotebookNode", dict]:
"""Apply a transformation on each cell.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
# Get files directory if it has been specified
output_files_dir = resources.get("output_files_dir", None)
# Make sure outputs key exists
if not isinstance(resources["outputs"], dict):
resources["outputs"] = {}
# Loop through all of the attachments in the cell
for name, attach in cell.get("attachments", {}).items():
orig_name = name
name = re.sub(r"%[\w\d][\w\d]", "-", name)
for mime, data in attach.items():
if mime not in self.extract_output_types:
continue
# Binary files are base64-encoded, SVG is already XML
if mime in {"image/png", "image/jpeg", "application/pdf"}:
# data is b64-encoded as text (str, unicode),
# we want the original bytes
data = a2b_base64(data)
elif sys.platform == "win32":
data = data.replace("\n", "\r\n").encode("UTF-8")
else:
data = data.encode("UTF-8")
filename = self.output_filename_template.format(
cell_index=cell_index,
name=name,
unique_key=resources.get("unique_key", ""),
)
if output_files_dir is not None:
filename = os.path.join(output_files_dir, filename)
if name.endswith(".gif") and mime == "image/png":
filename = filename.replace(".gif", ".png")
# In the resources, make the figure available via
# resources['outputs']['filename'] = data
resources["outputs"][filename] = data
# now we need to change the cell source so that it links to the
# filename instead of `attachment:`
attach_str = "attachment:" + orig_name
if attach_str in cell.source:
cell.source = cell.source.replace(attach_str, filename)
return cell, resources | Apply a transformation on each cell.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py) |
def decode(geohash):
"""
Decode geohash, returning two strings with latitude and longitude
containing only relevant digits and with trailing zeroes removed.
"""
lat, lon, lat_err, lon_err = decode_exactly(geohash)
# Format to the number of decimals that are known
lats = "%.*f" % (max(1, int(round(-log10(lat_err)))) - 1, lat)
lons = "%.*f" % (max(1, int(round(-log10(lon_err)))) - 1, lon)
if '.' in lats: lats = lats.rstrip('0')
if '.' in lons: lons = lons.rstrip('0')
return lats, lons | Decode geohash, returning two strings with latitude and longitude
containing only relevant digits and with trailing zeroes removed. |
def _as_dict(self):
""" Returns a map of column names to cleaned values """
values = self._dynamic_columns or {}
for name, col in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values | Returns a map of column names to cleaned values |
def module_imports_on_top_of_file(
logical_line, indent_level, checker_state, noqa):
r"""Place imports at the top of the file.
Always put imports at the top of the file, just after any module comments
and docstrings, and before module globals and constants.
Okay: import os
Okay: # this is a comment\nimport os
Okay: '''this is a module docstring'''\nimport os
Okay: r'''this is a module docstring'''\nimport os
Okay:
try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y
Okay:
try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y
E402: a=1\nimport os
E402: 'One string'\n"Two string"\nimport os
E402: a=1\nfrom sys import x
Okay: if x:\n import os
"""
def is_string_literal(line):
if line[0] in 'uUbB':
line = line[1:]
if line and line[0] in 'rR':
line = line[1:]
return line and (line[0] == '"' or line[0] == "'")
allowed_try_keywords = ('try', 'except', 'else', 'finally')
if indent_level: # Allow imports in conditional statements or functions
return
if not logical_line: # Allow empty lines or comments
return
if noqa:
return
line = logical_line
if line.startswith('import ') or line.startswith('from '):
if checker_state.get('seen_non_imports', False):
yield 0, "E402 module level import not at top of file"
elif re.match(DUNDER_REGEX, line):
return
elif any(line.startswith(kw) for kw in allowed_try_keywords):
# Allow try, except, else, finally keywords intermixed with imports in
# order to support conditional importing
return
elif is_string_literal(line):
# The first literal is a docstring, allow it. Otherwise, report error.
if checker_state.get('seen_docstring', False):
checker_state['seen_non_imports'] = True
else:
checker_state['seen_docstring'] = True
else:
checker_state['seen_non_imports'] = True | r"""Place imports at the top of the file.
Always put imports at the top of the file, just after any module comments
and docstrings, and before module globals and constants.
Okay: import os
Okay: # this is a comment\nimport os
Okay: '''this is a module docstring'''\nimport os
Okay: r'''this is a module docstring'''\nimport os
Okay:
try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y
Okay:
try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y
E402: a=1\nimport os
E402: 'One string'\n"Two string"\nimport os
E402: a=1\nfrom sys import x
Okay: if x:\n import os |
def weather(self, latitude=None, longitude=None, date=None):
# type:(float, float, datetime) -> Weather
"""
:param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:rtype: Weather
Example uses
.. code-block:: python
# DarkSky instantiation
>>> darksky = pydarksky.DarkSky(api_key)
# Pre-define values
>>> darksky.latitude = -34.9285
>>> darksky.longitude = 138.6005
>>> weather = darksky.weather()
# Pass values as params
>>> weather = darksky.weather(latitude=-34.9285, longitude=138.6005)
# Pass values from dict
>>> kwargs = {"longitude": 138.6005, "latitude": -34.9285}
>>> weather = darksky.weather(**kwargs)
"""
# If params are default(None) check if latitude/longitude have already been defined(Not None)
# Otherwise TypeError is raised
if latitude is None:
if self.latitude is None:
raise TypeError("latitude must be type '<class 'str'>' is None")
else:
self.latitude = latitude
if longitude is None:
if self.longitude is None:
raise TypeError("longitude must be type '<class 'str'>' is None")
else:
self.longitude = longitude
self._date = date
url = self.url
log.debug(url)
self._response = requests.get(url, headers={"Accept-Encoding": "gzip"}, timeout=5)
self._response.raise_for_status()
self._weather = Weather(self._response.text)
return self._weather | :param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:rtype: Weather
Example uses
.. code-block:: python
# DarkSky instantiation
>>> darksky = pydarksky.DarkSky(api_key)
# Pre-define values
>>> darksky.latitude = -34.9285
>>> darksky.longitude = 138.6005
>>> weather = darksky.weather()
# Pass values as params
>>> weather = darksky.weather(latitude=-34.9285, longitude=138.6005)
# Pass values from dict
>>> kwargs = {"longitude": 138.6005, "latitude": -34.9285}
>>> weather = darksky.weather(**kwargs) |
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None, chain_spacing=0.05, vline=0):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
"""
if not gridspec:
print_(
'\nYour installation of matplotlib is not recent enough to support summary_plot; this function is disabled until matplotlib is updated.')
return
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
try:
# First try MultiTrace type
traces = trace_obj.traces
if rhat and len(traces) > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
rhat = False
except AttributeError:
# Single NpTrace
traces = [trace_obj]
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = traces[0].varnames
# Empty list for y-axis labels
labels = []
chains = len(traces)
if gs is None:
# Initialize plot
if rhat and chains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = subplot(gs[0])
for j, tr in enumerate(traces):
# Get quantiles
trace_quantiles = quantiles(tr, qlist)
hpd_intervals = hpd(tr, alpha)
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[varname]
quants = var_quantiles.values()
var_hpd = hpd_intervals[varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = tr[varname][0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(chains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color="blue")
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color="blue")
# Increment index
var += k
labels = ylabels or labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
title(plot_title)
# Add x-axis label
if xtitle is not None:
xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and chains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = subplot(gs[1])
if main is not False:
title("R-hat")
# Set x range
xlim(0.9, 2.1)
# X axis labels
xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
value = traces[0][varname][0]
k = np.size(value)
if k > 1:
plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs | Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0). |
def bfs_depth(self, U):
'''
Returns the maximum distance between any vertex and U in the connected
component containing U
:param U:
:return:
'''
bfs_queue = [[U, 0]] # Stores the vertices whose BFS hadn't been completed.
visited = set()
max_depth = 0
while bfs_queue:
[V, depth] = bfs_queue.pop()
if max_depth < depth:
max_depth = depth
visited.add(V)
adj_set = self.edges[V]
for W in adj_set:
if W not in visited:
bfs_queue.append([W, depth + 1])
return max_depth | Returns the maximum distance between any vertex and U in the connected
component containing U
:param U:
:return: |
def set_topic_config(self, topic, value, kafka_version=(0, 10, )):
"""Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kafka version the brokers are running on.
Defaults to (0, 10, x). Kafka version 9 and kafka 10
support this feature.
"""
config_data = dump_json(value)
try:
# Change value
return_value = self.set(
"/config/topics/{topic}".format(topic=topic),
config_data
)
# Create change
version = kafka_version[1]
# this feature is supported in kafka 9 and kafka 10
assert version in (9, 10), "Feature supported with kafka 9 and kafka 10"
if version == 9:
# https://github.com/apache/kafka/blob/0.9.0.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L334
change_node = dump_json({
"version": 1,
"entity_type": "topics",
"entity_name": topic
})
else: # kafka 10
# https://github.com/apache/kafka/blob/0.10.2.1/
# core/src/main/scala/kafka/admin/AdminUtils.scala#L574
change_node = dump_json({
"version": 2,
"entity_path": "topics/" + topic,
})
self.create(
'/config/changes/config_change_',
change_node,
sequence=True
)
except NoNodeError as e:
_log.error(
"topic {topic} not found.".format(topic=topic)
)
raise e
return return_value | Set configuration information for specified topic.
:topic : topic whose configuration needs to be changed
:value : config value with which the topic needs to be
updated with. This would be of the form key=value.
Example 'cleanup.policy=compact'
:kafka_version :tuple kafka version the brokers are running on.
Defaults to (0, 10, x). Kafka version 9 and kafka 10
support this feature. |
def restrict_args(func, *args, **kwargs):
'''
Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1
'''
callargs = getargspec(func)
if not callargs.varargs:
args = args[0:len(callargs.args)]
return func(*args, **kwargs) | Restricts the possible arguements to a method to match the func argument.
restrict_args(lambda a: a, 1, 2)
# => 1 |
def paginate(parser, token, paginator_class=None):
"""Paginate objects.
Usage:
.. code-block:: html+django
{% paginate entries %}
After this call, the *entries* variable in the template context is replaced
by only the entries of the current page.
You can also keep your *entries* original variable (usually a queryset)
and add to the context another name that refers to entries of the current
page, e.g.:
.. code-block:: html+django
{% paginate entries as page_entries %}
The *as* argument is also useful when a nested context variable is provided
as queryset. In this case, and only in this case, the resulting variable
name is mandatory, e.g.:
.. code-block:: html+django
{% paginate entries.all as entries %}
The number of paginated entries is taken from settings, but you can
override the default locally, e.g.:
.. code-block:: html+django
{% paginate 20 entries %}
Of course you can mix it all:
.. code-block:: html+django
{% paginate 20 entries as paginated_entries %}
By default, the first page is displayed the first time you load the page,
but you can change this, e.g.:
.. code-block:: html+django
{% paginate entries starting from page 3 %}
When changing the default page, it is also possible to reference the last
page (or the second last page, and so on) by using negative indexes, e.g:
.. code-block:: html+django
{% paginate entries starting from page -1 %}
This can be also achieved using a template variable that was passed to the
context, e.g.:
.. code-block:: html+django
{% paginate entries starting from page page_number %}
If the passed page number does not exist, the first page is displayed.
If you have multiple paginations in the same page, you can change the
querydict key for the single pagination, e.g.:
.. code-block:: html+django
{% paginate entries using article_page %}
In this case *article_page* is intended to be a context variable, but you
can hardcode the key using quotes, e.g.:
.. code-block:: html+django
{% paginate entries using 'articles_at_page' %}
Again, you can mix it all (the order of arguments is important):
.. code-block:: html+django
{% paginate 20 entries
starting from page 3 using page_key as paginated_entries %}
Additionally you can pass a path to be used for the pagination:
.. code-block:: html+django
{% paginate 20 entries
using page_key with pagination_url as paginated_entries %}
This way you can easily create views acting as API endpoints, and point
your Ajax calls to that API. In this case *pagination_url* is considered a
context variable, but it is also possible to hardcode the URL, e.g.:
.. code-block:: html+django
{% paginate 20 entries with "/mypage/" %}
If you want the first page to contain a different number of items than
subsequent pages, you can separate the two values with a comma, e.g. if
you want 3 items on the first page and 10 on other pages:
.. code-block:: html+django
{% paginate 3,10 entries %}
You must use this tag before calling the {% show_more %} one.
"""
# Validate arguments.
try:
tag_name, tag_args = token.contents.split(None, 1)
except ValueError:
msg = '%r tag requires arguments' % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
# Use a regexp to catch args.
match = PAGINATE_EXPRESSION.match(tag_args)
if match is None:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Retrieve objects.
kwargs = match.groupdict()
objects = kwargs.pop('objects')
# The variable name must be present if a nested context variable is passed.
if '.' in objects and kwargs['var_name'] is None:
msg = (
'%(tag)r tag requires a variable name `as` argumnent if the '
'queryset is provided as a nested context variable (%(objects)s). '
'You must either pass a direct queryset (e.g. taking advantage '
'of the `with` template tag) or provide a new variable name to '
'store the resulting queryset (e.g. `%(tag)s %(objects)s as '
'objects`).'
) % {'tag': tag_name, 'objects': objects}
raise template.TemplateSyntaxError(msg)
# Call the node.
return PaginateNode(paginator_class, objects, **kwargs) | Paginate objects.
Usage:
.. code-block:: html+django
{% paginate entries %}
After this call, the *entries* variable in the template context is replaced
by only the entries of the current page.
You can also keep your *entries* original variable (usually a queryset)
and add to the context another name that refers to entries of the current
page, e.g.:
.. code-block:: html+django
{% paginate entries as page_entries %}
The *as* argument is also useful when a nested context variable is provided
as queryset. In this case, and only in this case, the resulting variable
name is mandatory, e.g.:
.. code-block:: html+django
{% paginate entries.all as entries %}
The number of paginated entries is taken from settings, but you can
override the default locally, e.g.:
.. code-block:: html+django
{% paginate 20 entries %}
Of course you can mix it all:
.. code-block:: html+django
{% paginate 20 entries as paginated_entries %}
By default, the first page is displayed the first time you load the page,
but you can change this, e.g.:
.. code-block:: html+django
{% paginate entries starting from page 3 %}
When changing the default page, it is also possible to reference the last
page (or the second last page, and so on) by using negative indexes, e.g:
.. code-block:: html+django
{% paginate entries starting from page -1 %}
This can be also achieved using a template variable that was passed to the
context, e.g.:
.. code-block:: html+django
{% paginate entries starting from page page_number %}
If the passed page number does not exist, the first page is displayed.
If you have multiple paginations in the same page, you can change the
querydict key for the single pagination, e.g.:
.. code-block:: html+django
{% paginate entries using article_page %}
In this case *article_page* is intended to be a context variable, but you
can hardcode the key using quotes, e.g.:
.. code-block:: html+django
{% paginate entries using 'articles_at_page' %}
Again, you can mix it all (the order of arguments is important):
.. code-block:: html+django
{% paginate 20 entries
starting from page 3 using page_key as paginated_entries %}
Additionally you can pass a path to be used for the pagination:
.. code-block:: html+django
{% paginate 20 entries
using page_key with pagination_url as paginated_entries %}
This way you can easily create views acting as API endpoints, and point
your Ajax calls to that API. In this case *pagination_url* is considered a
context variable, but it is also possible to hardcode the URL, e.g.:
.. code-block:: html+django
{% paginate 20 entries with "/mypage/" %}
If you want the first page to contain a different number of items than
subsequent pages, you can separate the two values with a comma, e.g. if
you want 3 items on the first page and 10 on other pages:
.. code-block:: html+django
{% paginate 3,10 entries %}
You must use this tag before calling the {% show_more %} one. |
def add_locals(self, locals):
'''
If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy.
'''
if locals is None:
return self
return _jinja2_vars(self.basedir, self.vars, self.globals, locals, *self.extras) | If locals are provided, create a copy of self containing those
locals in addition to what is already in this variable proxy. |
def color(self, *args):
'''
:param args: color in a supported format.
:return: Color object containing the color.
'''
return self.Color(mode=self.color_mode, color_range=self.color_range, *args) | :param args: color in a supported format.
:return: Color object containing the color. |
def _register_simple(self, endpoint, scheme, f):
"""Register a simple endpoint with this TChannel.
:param endpoint:
Name of the endpoint being registered.
:param scheme:
Name of the arg scheme under which the endpoint will be
registered.
:param f:
Callable handler for the endpoint.
"""
assert scheme in DEFAULT_NAMES, ("Unsupported arg scheme %s" % scheme)
if scheme == JSON:
req_serializer = JsonSerializer()
resp_serializer = JsonSerializer()
else:
req_serializer = RawSerializer()
resp_serializer = RawSerializer()
self._handler.register(endpoint, f, req_serializer, resp_serializer)
return f | Register a simple endpoint with this TChannel.
:param endpoint:
Name of the endpoint being registered.
:param scheme:
Name of the arg scheme under which the endpoint will be
registered.
:param f:
Callable handler for the endpoint. |
def kafka_kip(enrich):
""" Kafka Improvement Proposals process study """
def extract_vote_and_binding(body):
""" Extracts the vote and binding for a KIP process included in message body """
vote = 0
binding = 0 # by default the votes are binding for +1
nlines = 0
for line in body.split("\n"):
if nlines > MAX_LINES_FOR_VOTE:
# The vote must be in the first MAX_LINES_VOTE
break
if line.startswith(">"):
# This line is from a previous email
continue
elif "+1" in line and "-1" in line:
# Report summary probably
continue
elif "to -1" in line or "is -1" in line or "= -1" in line or "-1 or" in line:
continue
elif line.startswith("+1") or " +1 " in line or line.endswith("+1") \
or " +1." in line or " +1," in line:
vote = 1
binding = 1 # by default the votes are binding for +1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
elif line.startswith("-1") or line.endswith(" -1") or " -1 " in line \
or " -1." in line or " -1," in line:
vote = -1
if 'non-binding' in line.lower():
binding = 0
elif 'binding' in line.lower():
binding = 1
break
nlines += 1
return (vote, binding)
def extract_kip(subject):
""" Extracts a KIP number from an email subject """
kip = None
if not subject:
return kip
if 'KIP' not in subject:
return kip
kip_tokens = subject.split('KIP')
if len(kip_tokens) > 2:
# [KIP-DISCUSSION] KIP-7 Security
for token in kip_tokens:
kip = extract_kip("KIP" + token)
if kip:
break
# logger.debug("Several KIPs in %s. Found: %i", subject, kip)
return kip
str_with_kip = kip_tokens[1]
if not str_with_kip:
# Sample use case subject: Create a space template for KIP
return kip
if str_with_kip[0] == '-':
try:
# KIP-120: Control
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-8 Add
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-11- Authorization
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Bound fetch response size (KIP-74)
str_kip = str_with_kip[1:].split(")")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31&
str_kip = str_with_kip[1:].split("&")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# KIP-31/
str_kip = str_with_kip[1:].split("/")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: Copycat (KIP-26. PR-99) - plan on moving forward
str_kip = str_with_kip[1:].split(".")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ' ':
try:
# KIP 20 Enable
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# Re: [DISCUSS] KIP 88: DescribeGroups Protocol Update
str_kip = str_with_kip[1:].split(":")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
try:
# [jira] [Updated] (KAFKA-5092) KIP 141- ProducerRecordBuilder
str_kip = str_with_kip[1:].split("-")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
elif str_with_kip[0] == ':':
try:
# Re: [VOTE] KIP:71 Enable log compaction and deletion to co-exist
str_kip = str_with_kip[1:].split(" ")[0]
kip = int(str_kip)
return kip
except ValueError:
pass
if not kip:
# logger.debug("Can not extract KIP from %s", subject)
pass
return kip
def lazy_result(votes):
""" Compute the result of a votation using lazy consensus
which requires 3 binding +1 votes and no binding vetoes.
"""
yes = 0
yes_binding = 0
veto = 0
veto_binding = 0
result = -1
for (vote, binding) in votes:
if vote == 1:
if binding:
yes_binding += 1
else:
yes += 1
if vote == -1:
if binding:
veto_binding += 1
else:
veto += 1
if veto_binding == 0 and yes_binding >= 3:
result = 1
return result
def add_kip_final_status_field(enrich):
""" Add kip final status field """
total = 0
for eitem in enrich.fetch():
if "kip" not in eitem:
# It is not a KIP message
continue
if eitem['kip'] in enrich.kips_final_status:
eitem.update({"kip_final_status":
enrich.kips_final_status[eitem['kip']]})
else:
logger.warning("No final status for kip: %i", eitem['kip'])
eitem.update({"kip_final_status": None})
yield eitem
total += 1
logger.info("Total eitems with kafka final status kip field %i", total)
def add_kip_time_status_fields(enrich):
""" Add kip fields with final status and times """
total = 0
max_inactive_days = 90 # days
enrich.kips_final_status = {} # final status for each kip
for eitem in enrich.fetch():
# kip_status: adopted (closed), discussion (open), voting (open),
# inactive (open), discarded (closed)
# kip_start_end: discuss_start, discuss_end, voting_start, voting_end
kip_fields = {
"kip_status": None,
"kip_discuss_time_days": None,
"kip_discuss_inactive_days": None,
"kip_voting_time_days": None,
"kip_voting_inactive_days": None,
"kip_is_first_discuss": 0,
"kip_is_first_vote": 0,
"kip_is_last_discuss": 0,
"kip_is_last_vote": 0,
"kip_result": None,
"kip_start_end": None
}
if "kip" not in eitem:
# It is not a KIP message
continue
kip = eitem["kip"]
kip_date = parser.parse(eitem["email_date"])
if eitem['kip_is_discuss']:
kip_fields["kip_discuss_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_discuss'],
enrich.kips_dates[kip]['kip_max_discuss'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_discuss']:
kip_fields['kip_is_first_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_discuss']:
kip_fields['kip_is_last_discuss'] = 1
kip_fields['kip_start_end'] = 'discuss_end'
# Detect discussion status
if "kip_min_vote" not in enrich.kips_dates[kip]:
kip_fields['kip_status'] = 'discussion'
max_discuss_date = enrich.kips_dates[kip]['kip_max_discuss']
kip_fields['kip_discuss_inactive_days'] = \
get_time_diff_days(max_discuss_date.replace(tzinfo=None),
datetime.utcnow())
if eitem['kip_is_vote']:
kip_fields["kip_voting_time_days"] = \
get_time_diff_days(enrich.kips_dates[kip]['kip_min_vote'],
enrich.kips_dates[kip]['kip_max_vote'])
# Detect first and last discuss messages
if kip_date == enrich.kips_dates[kip]['kip_min_vote']:
kip_fields['kip_is_first_vote'] = 1
kip_fields['kip_start_end'] = 'voting_start'
elif kip_date == enrich.kips_dates[kip]['kip_max_vote']:
kip_fields['kip_is_last_vote'] = 1
kip_fields['kip_start_end'] = 'voting_end'
# Detect discussion status
kip_fields['kip_status'] = 'voting'
max_vote_date = enrich.kips_dates[kip]['kip_max_vote']
kip_fields['kip_voting_inactive_days'] = \
get_time_diff_days(max_vote_date.replace(tzinfo=None),
datetime.utcnow())
# Now check if there is a result from enrich.kips_scores
kip_fields['kip_result'] = lazy_result(enrich.kips_scores[kip])
if kip_fields['kip_result'] == 1:
kip_fields['kip_status'] = 'adopted'
elif kip_fields['kip_result'] == -1:
kip_fields['kip_status'] = 'discarded'
# And now change the status inactive
if kip_fields['kip_status'] not in ['adopted', 'discarded']:
inactive_days = kip_fields['kip_discuss_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
inactive_days = kip_fields['kip_voting_inactive_days']
if inactive_days and inactive_days > max_inactive_days:
kip_fields['kip_status'] = 'inactive'
# The final status is in the kip_is_last_discuss or kip_is_last_vote
# It will be filled in the next enrichment round
eitem.update(kip_fields)
if eitem['kip'] not in enrich.kips_final_status:
enrich.kips_final_status[kip] = None
if eitem['kip_is_last_discuss'] and not enrich.kips_final_status[kip]:
enrich.kips_final_status[kip] = kip_fields['kip_status']
if eitem['kip_is_last_vote']:
enrich.kips_final_status[kip] = kip_fields['kip_status']
yield eitem
total += 1
logger.info("Total eitems with kafka extra kip fields %i", total)
def add_kip_fields(enrich):
""" Add extra fields needed for kip analysis"""
total = 0
enrich.kips_dates = {
0: {
"kip_min_discuss": None,
"kip_max_discuss": None,
"kip_min_vote": None,
"kip_max_vote": None,
}
}
enrich.kips_scores = {}
# First iteration
for eitem in enrich.fetch():
kip_fields = {
"kip_is_vote": 0,
"kip_is_discuss": 0,
"kip_vote": 0,
"kip_binding": 0,
"kip": 0,
"kip_type": "general"
}
kip = extract_kip(eitem['Subject'])
if not kip:
# It is not a KIP message
continue
if kip not in enrich.kips_dates:
enrich.kips_dates[kip] = {}
if kip not in enrich.kips_scores:
enrich.kips_scores[kip] = []
kip_date = parser.parse(eitem["email_date"])
# Analyze the subject to fill the kip fields
if '[discuss]' in eitem['Subject'].lower() or \
'[kip-discussion]'in eitem['Subject'].lower() or \
'[discussion]'in eitem['Subject'].lower():
kip_fields['kip_is_discuss'] = 1
kip_fields['kip_type'] = "discuss"
kip_fields['kip'] = kip
# Update kip discuss dates
if "kip_min_discuss" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_discuss": kip_date,
"kip_max_discuss": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_discuss"] >= kip_date:
enrich.kips_dates[kip]["kip_min_discuss"] = kip_date
if enrich.kips_dates[kip]["kip_max_discuss"] <= kip_date:
enrich.kips_dates[kip]["kip_max_discuss"] = kip_date
if '[vote]' in eitem['Subject'].lower():
kip_fields['kip_is_vote'] = 1
kip_fields['kip_type'] = "vote"
kip_fields['kip'] = kip
if 'body_extract' in eitem:
(vote, binding) = extract_vote_and_binding(eitem['body_extract'])
enrich.kips_scores[kip] += [(vote, binding)]
kip_fields['kip_vote'] = vote
kip_fields['kip_binding'] = binding
else:
logger.debug("Message %s without body", eitem['Subject'])
# Update kip discuss dates
if "kip_min_vote" not in enrich.kips_dates[kip]:
enrich.kips_dates[kip].update({
"kip_min_vote": kip_date,
"kip_max_vote": kip_date
})
else:
if enrich.kips_dates[kip]["kip_min_vote"] >= kip_date:
enrich.kips_dates[kip]["kip_min_vote"] = kip_date
if enrich.kips_dates[kip]["kip_max_vote"] <= kip_date:
enrich.kips_dates[kip]["kip_max_vote"] = kip_date
eitem.update(kip_fields)
yield eitem
total += 1
logger.info("Total eitems with kafka kip fields %i", total)
logger.debug("Doing kafka_kip study from %s", enrich.elastic.anonymize_url(enrich.elastic.index_url))
# First iteration with the basic fields
eitems = add_kip_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Second iteration with the final time and status fields
eitems = add_kip_time_status_fields(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id())
# Third iteration to compute the end status field for all KIPs
eitems = add_kip_final_status_field(enrich)
enrich.elastic.bulk_upload(eitems, enrich.get_field_unique_id()) | Kafka Improvement Proposals process study |
def get_outcome_for_state_id(self, state_id):
""" Returns the final outcome of the child state specified by the state_id.
:param state_id: The id of the state to get the final outcome for.
:return:
"""
return_value = None
for s_id, name_outcome_tuple in self.final_outcomes_dict.items():
if s_id == state_id:
return_value = name_outcome_tuple[1]
break
return return_value | Returns the final outcome of the child state specified by the state_id.
:param state_id: The id of the state to get the final outcome for.
:return: |
def _get_torrent_category(self, tag, result=None):
"""Given a tag containing torrent details try to find category
of torrent. In search pages the category is found in links of
the form <a href='/tv/'>TV</a> with TV replaced with movies, books
etc. For the home page I will use the result number to
decide the category"""
hrefs = ["/movies/", "/tv/", "/music/", "/games/", "/applications/", "/anime/",
"/books/", "/xxx/"]
category = None
if not result is None: # if result: 0 returns false.
# Searching home page, get category from result number
category = hrefs[result / 10].strip("/")
return category
for item in hrefs:
if tag.select("a[href=" + item + "]"):
category = item.strip("/")
return category | Given a tag containing torrent details try to find category
of torrent. In search pages the category is found in links of
the form <a href='/tv/'>TV</a> with TV replaced with movies, books
etc. For the home page I will use the result number to
decide the category |
def _is_bst(root, min_value=float('-inf'), max_value=float('inf')):
"""Check if the binary tree is a BST (binary search tree).
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param min_value: Minimum node value seen.
:type min_value: int | float
:param max_value: Maximum node value seen.
:type max_value: int | float
:return: True if the binary tree is a BST, False otherwise.
:rtype: bool
"""
if root is None:
return True
return (
min_value < root.value < max_value and
_is_bst(root.left, min_value, root.value) and
_is_bst(root.right, root.value, max_value)
) | Check if the binary tree is a BST (binary search tree).
:param root: Root node of the binary tree.
:type root: binarytree.Node | None
:param min_value: Minimum node value seen.
:type min_value: int | float
:param max_value: Maximum node value seen.
:type max_value: int | float
:return: True if the binary tree is a BST, False otherwise.
:rtype: bool |
def managed(name, users=None, defaults=None):
'''
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
'''
result = False
comment = ''
changes = {}
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
users = _ordered_dict_to_dict(users)
defaults = _ordered_dict_to_dict(defaults)
expected_users = _expand_users(users, defaults)
valid, message = _check_users(expected_users)
if not valid: # check and clean
ret['comment'] = 'Please provide a valid configuration: {error}'.format(error=message)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
users_output = _retrieve_users()
if not users_output.get('result'):
ret['comment'] = 'Cannot retrieve users from the device: {reason}'.format(
reason=users_output.get('comment')
)
return ret
configured_users = users_output.get('out', {})
if configured_users == expected_users:
ret.update({
'comment': 'Users already configured as needed.',
'result': True
})
return ret
diff = _compute_diff(configured_users, expected_users)
users_to_add = diff.get('add', {})
users_to_update = diff.get('update', {})
users_to_remove = diff.get('remove', {})
changes = {
'added': users_to_add,
'updated': users_to_update,
'removed': users_to_remove
}
ret.update({
'changes': changes
})
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed --------------------------------------------------------------->
expected_config_change = False
successfully_changed = True
if users_to_add:
_set = _set_users(users_to_add)
if _set.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot configure new users: {reason}'.format(
reason=_set.get('comment')
)
if users_to_update:
_update = _update_users(users_to_update)
if _update.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot update the users configuration: {reason}'.format(
reason=_update.get('comment')
)
if users_to_remove:
_delete = _delete_users(users_to_remove)
if _delete.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot remove users: {reason}'.format(
reason=_delete.get('comment')
)
# <---- Call _set_users and _delete_users as needed ----------------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change and successfully_changed:
config_result, config_comment = __salt__['net.config_control']()
result = config_result
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
if expected_config_change and result and not comment:
comment = 'Configuration updated!'
ret.update({
'result': result,
'comment': comment
})
return ret | Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be remove whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s |
def main():
"""
Parse command line arguments and then run the test suite
"""
parser = argparse.ArgumentParser(description='A distributed test framework')
parser.add_argument('testfile',
help='The file that is used to determine the test suite run')
parser.add_argument('--test-only',
nargs='*',
dest='test_list',
help='run only the named tests to help debug broken tests')
parser.add_argument('--machine-list',
nargs='*',
dest='machine_list',
help='''mapping of logical host names to physical names allowing the same
test suite to run on different hardware, each argument is a pair
of logical name and physical name separated by a =''')
parser.add_argument('--config-overrides',
nargs='*',
dest='config_overrides',
help='''config overrides at execution time, each argument is a config with
its value separated by a =. This has the highest priority of all
configs''')
parser.add_argument('-d', '--output-dir',
dest='output_dir',
help='''Directory to write output files and logs. Defaults to the current
directory.''')
parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO")
parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)",
default="ERROR")
parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt")
parser.add_argument("--user", dest="user", help="user to run the test as (defaults to current user)")
args = parser.parse_args()
try:
call_main(args)
except ValueError:
#We only sys.exit here, as call_main is used as part of a unit test
#and should not exit the system
sys.exit(1) | Parse command line arguments and then run the test suite |
def _ParseDateTimeValue(self, parser_mediator, date_time_value):
"""Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string.
"""
if date_time_value[14] != 'Z':
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
try:
year = int(date_time_value[0:4], 10)
month = int(date_time_value[4:6], 10)
day_of_month = int(date_time_value[6:8], 10)
hours = int(date_time_value[8:10], 10)
minutes = int(date_time_value[10:12], 10)
seconds = int(date_time_value[12:14], 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
return dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None | Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string. |
def validate(self, columns=None):
"""
Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool>
"""
schema = self.schema()
if not columns:
ignore_flags = orb.Column.Flags.Virtual | orb.Column.Flags.ReadOnly
columns = schema.columns(flags=~ignore_flags).values()
use_indexes = True
else:
use_indexes = False
# validate the column values
values = self.values(key='column', columns=columns)
for col, value in values.items():
if not col.validate(value):
return False
# valide the index values
if use_indexes:
for index in self.schema().indexes().values():
if not index.validate(self, values):
return False
return True | Validates the current record object to make sure it is ok to commit to the database. If
the optional override dictionary is passed in, then it will use the given values vs. the one
stored with this record object which can be useful to check to see if the record will be valid before
it is committed.
:param overrides | <dict>
:return <bool> |
def check_monophyly(self,
values,
target_attr,
ignore_missing=False,
unrooted=False):
"""
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves | Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set) |
def _handle_version(self, data):
"""
Handles received version data.
:param data: Version string to parse
:type data: string
"""
_, version_string = data.split(':')
version_parts = version_string.split(',')
self.serial_number = version_parts[0]
self.version_number = version_parts[1]
self.version_flags = version_parts[2] | Handles received version data.
:param data: Version string to parse
:type data: string |
def _grp_store_group(self, traj_group, store_data=pypetconstants.STORE_DATA,
with_links=True, recursive=False, max_depth=None,
_hdf5_group=None, _newly_created=False):
"""Stores a group node.
For group nodes only annotations and comments need to be stored.
"""
if store_data == pypetconstants.STORE_NOTHING:
return
elif store_data == pypetconstants.STORE_DATA_SKIPPING and traj_group._stored:
self._logger.debug('Already found `%s` on disk I will not store it!' %
traj_group.v_full_name)
elif not recursive:
if _hdf5_group is None:
_hdf5_group, _newly_created = self._all_create_or_get_groups(traj_group.v_full_name)
overwrite = store_data == pypetconstants.OVERWRITE_DATA
if (traj_group.v_comment != '' and
(HDF5StorageService.COMMENT not in _hdf5_group._v_attrs or overwrite)):
setattr(_hdf5_group._v_attrs, HDF5StorageService.COMMENT, traj_group.v_comment)
if ((_newly_created or overwrite) and
type(traj_group) not in (nn.NNGroupNode, nn.ConfigGroup, nn.ParameterGroup,
nn.DerivedParameterGroup, nn.ResultGroup)):
# We only store the name of the class if it is not one of the standard groups,
# that are always used.
setattr(_hdf5_group._v_attrs, HDF5StorageService.CLASS_NAME,
traj_group.f_get_class_name())
self._ann_store_annotations(traj_group, _hdf5_group, overwrite=overwrite)
self._hdf5file.flush()
traj_group._stored = True
# Signal completed node loading
self._node_processing_timer.signal_update()
if recursive:
parent_traj_group = traj_group.f_get_parent()
parent_hdf5_group = self._all_create_or_get_groups(parent_traj_group.v_full_name)[0]
self._tree_store_nodes_dfs(parent_traj_group, traj_group.v_name, store_data=store_data,
with_links=with_links, recursive=recursive,
max_depth=max_depth, current_depth=0,
parent_hdf5_group=parent_hdf5_group) | Stores a group node.
For group nodes only annotations and comments need to be stored. |
def additions_version():
'''
Check VirtualBox Guest Additions version.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_version
:return: version of VirtualBox Guest Additions or False if they are not installed
'''
try:
d = _additions_dir()
except EnvironmentError:
return False
if d and os.listdir(d):
return re.sub(r'^{0}-'.format(_additions_dir_prefix), '',
os.path.basename(d))
return False | Check VirtualBox Guest Additions version.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_version
:return: version of VirtualBox Guest Additions or False if they are not installed |
def addPath(rel_path, prepend=False):
""" Adds a directory to the system python path, either by append (doesn't
override default or globally installed package names) or by prepend
(overrides default/global package names).
"""
path = lambda *paths: os.path.abspath(
os.path.join(os.path.dirname(__file__), *paths)) + '/'
if prepend:
return sys.path.insert(0, path(rel_path))
return sys.path.append(path(rel_path)) | Adds a directory to the system python path, either by append (doesn't
override default or globally installed package names) or by prepend
(overrides default/global package names). |
def handle_namespace_pattern(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle statements like ``DEFINE NAMESPACE X AS PATTERN "Y"``.
:raises: RedefinedNamespaceError
"""
namespace = tokens['name']
self.raise_for_redefined_namespace(line, position, namespace)
self.namespace_to_pattern[namespace] = re.compile(tokens['value'])
return tokens | Handle statements like ``DEFINE NAMESPACE X AS PATTERN "Y"``.
:raises: RedefinedNamespaceError |
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text) | Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``. |
def bayesian_hmm(observations, estimated_hmm, nsample=100, reversible=True, stationary=False,
p0_prior='mixed', transition_matrix_prior='mixed', store_hidden=False, call_back=None):
r""" Bayesian HMM based on sampling the posterior
Generic maximum-likelihood estimation of HMMs
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
estimated_hmm : HMM
HMM estimated from estimate_hmm or initialize_hmm
reversible : bool, optional, default=True
If True, a prior that enforces reversible transition matrices (detailed balance) is used;
otherwise, a standard non-reversible prior is used.
stationary : bool, optional, default=False
If True, the stationary distribution of the transition matrix will be used as initial distribution.
Only use True if you are confident that the observation trajectories are started from a global
equilibrium. If False, the initial distribution will be estimated as usual from the first step
of the hidden trajectories.
nsample : int, optional, default=100
number of Gibbs sampling steps
p0_prior : None, str, float or ndarray(n)
Prior for the initial distribution of the HMM. Will only be active
if stationary=False (stationary=True means that p0 is identical to
the stationary distribution of the transition matrix).
Currently implements different versions of the Dirichlet prior that
is conjugate to the Dirichlet distribution of p0. p0 is sampled from:
.. math:
p0 \sim \prod_i (p0)_i^{a_i + n_i - 1}
where :math:`n_i` are the number of times a hidden trajectory was in
state :math:`i` at time step 0 and :math:`a_i` is the prior count.
Following options are available:
| 'mixed' (default), :math:`a_i = p_{0,init}`, where :math:`p_{0,init}`
is the initial distribution of initial_model.
| 'uniform', :math:`a_i = 1`
| ndarray(n) or float,
the given array will be used as A.
| None, :math:`a_i = 0`. This option ensures coincidence between
sample mean an MLE. Will sooner or later lead to sampling problems,
because as soon as zero trajectories are drawn from a given state,
the sampler cannot recover and that state will never serve as a starting
state subsequently. Only recommended in the large data regime and
when the probability to sample zero trajectories from any state
is negligible.
transition_matrix_prior : str or ndarray(n, n)
Prior for the HMM transition matrix.
Currently implements Dirichlet priors if reversible=False and reversible
transition matrix priors as described in [1]_ if reversible=True. For the
nonreversible case the posterior of transition matrix :math:`P` is:
.. math:
P \sim \prod_{i,j} p_{ij}^{b_{ij} + c_{ij} - 1}
where :math:`c_{ij}` are the number of transitions found for hidden
trajectories and :math:`b_{ij}` are prior counts.
| 'mixed' (default), :math:`b_{ij} = p_{ij,init}`, where :math:`p_{ij,init}`
is the transition matrix of initial_model. That means one prior
count will be used per row.
| 'uniform', :math:`b_{ij} = 1`
| ndarray(n, n) or broadcastable,
the given array will be used as B.
| None, :math:`b_ij = 0`. This option ensures coincidence between
sample mean an MLE. Will sooner or later lead to sampling problems,
because as soon as a transition :math:`ij` will not occur in a
sample, the sampler cannot recover and that transition will never
be sampled again. This option is not recommended unless you have
a small HMM and a lot of data.
store_hidden : bool, optional, default=False
store hidden trajectories in sampled HMMs
call_back : function, optional, default=None
a call back function with no arguments, which if given is being called
after each computed sample. This is useful for implementing progress bars.
Return
------
hmm : :class:`SampledHMM <bhmm.hmm.generic_sampled_hmm.SampledHMM>`
References
----------
.. [1] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe:
Estimation and uncertainty of reversible Markov models.
J. Chem. Phys. 143, 174101 (2015).
"""
# construct estimator
from bhmm.estimators.bayesian_sampling import BayesianHMMSampler as _BHMM
sampler = _BHMM(observations, estimated_hmm.nstates, initial_model=estimated_hmm,
reversible=reversible, stationary=stationary, transition_matrix_sampling_steps=1000,
p0_prior=p0_prior, transition_matrix_prior=transition_matrix_prior,
output=estimated_hmm.output_model.model_type)
# Sample models.
sampled_hmms = sampler.sample(nsamples=nsample, save_hidden_state_trajectory=store_hidden,
call_back=call_back)
# return model
from bhmm.hmm.generic_sampled_hmm import SampledHMM
return SampledHMM(estimated_hmm, sampled_hmms) | r""" Bayesian HMM based on sampling the posterior
Generic maximum-likelihood estimation of HMMs
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
estimated_hmm : HMM
HMM estimated from estimate_hmm or initialize_hmm
reversible : bool, optional, default=True
If True, a prior that enforces reversible transition matrices (detailed balance) is used;
otherwise, a standard non-reversible prior is used.
stationary : bool, optional, default=False
If True, the stationary distribution of the transition matrix will be used as initial distribution.
Only use True if you are confident that the observation trajectories are started from a global
equilibrium. If False, the initial distribution will be estimated as usual from the first step
of the hidden trajectories.
nsample : int, optional, default=100
number of Gibbs sampling steps
p0_prior : None, str, float or ndarray(n)
Prior for the initial distribution of the HMM. Will only be active
if stationary=False (stationary=True means that p0 is identical to
the stationary distribution of the transition matrix).
Currently implements different versions of the Dirichlet prior that
is conjugate to the Dirichlet distribution of p0. p0 is sampled from:
.. math:
p0 \sim \prod_i (p0)_i^{a_i + n_i - 1}
where :math:`n_i` are the number of times a hidden trajectory was in
state :math:`i` at time step 0 and :math:`a_i` is the prior count.
Following options are available:
| 'mixed' (default), :math:`a_i = p_{0,init}`, where :math:`p_{0,init}`
is the initial distribution of initial_model.
| 'uniform', :math:`a_i = 1`
| ndarray(n) or float,
the given array will be used as A.
| None, :math:`a_i = 0`. This option ensures coincidence between
sample mean an MLE. Will sooner or later lead to sampling problems,
because as soon as zero trajectories are drawn from a given state,
the sampler cannot recover and that state will never serve as a starting
state subsequently. Only recommended in the large data regime and
when the probability to sample zero trajectories from any state
is negligible.
transition_matrix_prior : str or ndarray(n, n)
Prior for the HMM transition matrix.
Currently implements Dirichlet priors if reversible=False and reversible
transition matrix priors as described in [1]_ if reversible=True. For the
nonreversible case the posterior of transition matrix :math:`P` is:
.. math:
P \sim \prod_{i,j} p_{ij}^{b_{ij} + c_{ij} - 1}
where :math:`c_{ij}` are the number of transitions found for hidden
trajectories and :math:`b_{ij}` are prior counts.
| 'mixed' (default), :math:`b_{ij} = p_{ij,init}`, where :math:`p_{ij,init}`
is the transition matrix of initial_model. That means one prior
count will be used per row.
| 'uniform', :math:`b_{ij} = 1`
| ndarray(n, n) or broadcastable,
the given array will be used as B.
| None, :math:`b_ij = 0`. This option ensures coincidence between
sample mean an MLE. Will sooner or later lead to sampling problems,
because as soon as a transition :math:`ij` will not occur in a
sample, the sampler cannot recover and that transition will never
be sampled again. This option is not recommended unless you have
a small HMM and a lot of data.
store_hidden : bool, optional, default=False
store hidden trajectories in sampled HMMs
call_back : function, optional, default=None
a call back function with no arguments, which if given is being called
after each computed sample. This is useful for implementing progress bars.
Return
------
hmm : :class:`SampledHMM <bhmm.hmm.generic_sampled_hmm.SampledHMM>`
References
----------
.. [1] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe:
Estimation and uncertainty of reversible Markov models.
J. Chem. Phys. 143, 174101 (2015). |
def textmetrics(self, txt, width=None, height=None, **kwargs):
'''Returns the width and height of a string of text as a tuple
(according to current font settings).
'''
# for now only returns width and height (as per Nodebox behaviour)
# but maybe we could use the other data from cairo
# we send doRender=False to prevent the actual rendering process, only the path generation is enabled
# not the most efficient way, but it generates accurate results
txt = self.Text(txt, 0, 0, width, height, enableRendering=False, **kwargs)
return txt.metrics | Returns the width and height of a string of text as a tuple
(according to current font settings). |
def cublasZtpmv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Matrix-vector product for complex triangular-packed matrix.
"""
status = _libcublas.cublasZtpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) | Matrix-vector product for complex triangular-packed matrix. |
def create_from_other( Class, other, values=None ):
"""
Create a new Matrix with attributes taken from `other` but with the
values taken from `values` if provided
"""
m = Class()
m.alphabet = other.alphabet
m.sorted_alphabet = other.sorted_alphabet
m.char_to_index = other.char_to_index
if values is not None:
m.values = values
else:
m.values = other.values
return m | Create a new Matrix with attributes taken from `other` but with the
values taken from `values` if provided |
def full_signature(self):
"""
The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``.
"""
if self.kind == "function":
return "{template}{return_type} {name}({parameters})".format(
template="template <{0}> ".format(", ".join(self.template)) if self.template else "",
return_type=self.return_type,
name=self.name,
parameters=", ".join(self.parameters)
)
raise RuntimeError(
"full_signature may only be called for a 'function', but {name} is a '{kind}' node.".format(
name=self.name, kind=self.kind
)
) | The full signature of a ``"function"`` node.
**Return**
:class:`python:str`
The full signature of the function, including template, return type,
name, and parameter types.
**Raises**
:class:`python:RuntimeError`
If ``self.kind != "function"``. |
def index(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._index = alias
return clone | Selects which database this QuerySet should execute its query against. |
def _maybe_clear_confirmation_futures(self):
"""Invoked when the message has finished processing, ensuring there
are no confirmation futures pending.
"""
for name in self._connections.keys():
self._connections[name].clear_confirmation_futures() | Invoked when the message has finished processing, ensuring there
are no confirmation futures pending. |
def _add32(ins):
""" Pops last 2 bytes from the stack and adds them.
Then push the result onto the stack.
Optimizations:
* If any of the operands is ZERO,
then do NOTHING: A + 0 = 0 + A = A
"""
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
o1, o2 = _int_ops(op1, op2)
if int(o2) == 0: # A + 0 = 0 + A = A => Do Nothing
output = _32bit_oper(o1)
output.append('push de')
output.append('push hl')
return output
if op1[0] == '_' and op2[0] != '_':
op1, op2 = op2, op1 # swap them
if op2[0] == '_':
output = _32bit_oper(op1)
output.append('ld bc, (%s)' % op2)
output.append('add hl, bc')
output.append('ex de, hl')
output.append('ld bc, (%s + 2)' % op2)
output.append('adc hl, bc')
output.append('push hl')
output.append('push de')
return output
output = _32bit_oper(op1, op2)
output.append('pop bc')
output.append('add hl, bc')
output.append('ex de, hl')
output.append('pop bc')
output.append('adc hl, bc')
output.append('push hl') # High and low parts are reversed
output.append('push de')
return output | Pops last 2 bytes from the stack and adds them.
Then push the result onto the stack.
Optimizations:
* If any of the operands is ZERO,
then do NOTHING: A + 0 = 0 + A = A |
def do_copy(self,args):
"""Copy specified id to stack. copy -h for detailed help."""
parser = CommandArgumentParser("copy")
parser.add_argument('-a','--asg',dest='asg',nargs='+',required=False,default=[],help='Copy specified ASG info.')
parser.add_argument('-o','--output',dest='output',nargs='+',required=False,default=[],help='Copy specified output info.')
args = vars(parser.parse_args(args))
values = []
if args['output']:
values.extend(self.getOutputs(args['output']))
if args['asg']:
for asg in args['asg']:
try:
index = int(asg)
asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index]
except:
asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][asg]
values.append(asgSummary.physical_resource_id)
print("values:{}".format(values))
pyperclip.copy("\n".join(values)) | Copy specified id to stack. copy -h for detailed help. |
def add_text_img(img, text, pos, box=None, color=None, thickness=1, scale=1, vertical=False):
"""
Adds the given text in the image.
:param img: Input image
:param text: String text
:param pos: (x, y) in the image or relative to the given Box object
:param box: Box object. If not None, the text is placed inside the box.
:param color: Color of the text.
:param thickness: Thickness of the font.
:param scale: Font size scale.
:param vertical: If true, the text is displayed vertically. (slow)
:return:
"""
if color is None:
color = COL_WHITE
text = str(text)
top_left = pos
if box is not None:
top_left = box.move(pos).to_int().top_left()
if top_left[0] > img.shape[1]:
return
if vertical:
if box is not None:
h, w, d = box.height, box.width, 3
else:
h, w, d = img.shape
txt_img = np.zeros((w, h, d), dtype=np.uint8)
# 90 deg rotation
top_left = h - pos[1], pos[0]
cv.putText(txt_img, text, top_left, cv.FONT_HERSHEY_PLAIN, scale, color, thickness)
txt_img = ndimage.rotate(txt_img, 90)
mask = txt_img > 0
if box is not None:
im_box = img_box(img, box)
im_box[mask] = txt_img[mask]
else:
img[mask] = txt_img[mask]
else:
cv.putText(img, text, top_left, cv.FONT_HERSHEY_PLAIN, scale, color, thickness) | Adds the given text in the image.
:param img: Input image
:param text: String text
:param pos: (x, y) in the image or relative to the given Box object
:param box: Box object. If not None, the text is placed inside the box.
:param color: Color of the text.
:param thickness: Thickness of the font.
:param scale: Font size scale.
:param vertical: If true, the text is displayed vertically. (slow)
:return: |
def intersection(self, another_moc, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Intersection between self and moc. ``delta_t`` gives the possibility to the user
to set a time resolution for performing the tmoc intersection
Parameters
----------
another_moc : `~mocpy.abstract_moc.AbstractMOC`
the MOC/TimeMOC used for performing the intersection with self
delta_t : `~astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMoc order to represent the observations. (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``)
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
MOC object whose interval set corresponds to : self & ``moc``
"""
order_op = TimeMOC.time_resolution_to_order(delta_t)
self_degraded, moc_degraded = self._process_degradation(another_moc, order_op)
return super(TimeMOC, self_degraded).intersection(moc_degraded) | Intersection between self and moc. ``delta_t`` gives the possibility to the user
to set a time resolution for performing the tmoc intersection
Parameters
----------
another_moc : `~mocpy.abstract_moc.AbstractMOC`
the MOC/TimeMOC used for performing the intersection with self
delta_t : `~astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMoc order to represent the observations. (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``)
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
MOC object whose interval set corresponds to : self & ``moc`` |
def print_email(message, app):
"""Print mail to stream.
Signal handler for email_dispatched signal. Prints by default the output
to the stream specified in the constructor of InvenioMail.
:param message: Message object.
:param app: Flask application object.
"""
invenio_mail = app.extensions['invenio-mail']
with invenio_mail._lock:
invenio_mail.stream.write(
'{0}\n{1}\n'.format(message.as_string(), '-' * 79))
invenio_mail.stream.flush() | Print mail to stream.
Signal handler for email_dispatched signal. Prints by default the output
to the stream specified in the constructor of InvenioMail.
:param message: Message object.
:param app: Flask application object. |
def new_geom(geom_type, size, pos=(0, 0, 0), rgba=RED, group=0, **kwargs):
"""
Creates a geom element with attributes specified by @**kwargs.
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size: geom size parameters.
pos: 3d position of the geom frame.
rgba: color and transparency. Defaults to solid red.
group: the integrer group that the geom belongs to. useful for
separating visual and physical elements.
"""
kwargs["type"] = str(geom_type)
kwargs["size"] = array_to_string(size)
kwargs["rgba"] = array_to_string(rgba)
kwargs["group"] = str(group)
kwargs["pos"] = array_to_string(pos)
element = ET.Element("geom", attrib=kwargs)
return element | Creates a geom element with attributes specified by @**kwargs.
Args:
geom_type (str): type of the geom.
see all types here: http://mujoco.org/book/modeling.html#geom
size: geom size parameters.
pos: 3d position of the geom frame.
rgba: color and transparency. Defaults to solid red.
group: the integrer group that the geom belongs to. useful for
separating visual and physical elements. |
def read_cell(self, x, y):
"""
reads the cell at position x and y; puts the default styles in xlwt
"""
cell = self._sheet.row(x)[y]
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index == 64:
self._file.xf_list[
cell.xf_index].background.pattern_colour_index = 9
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index in self.colors.keys():
style = self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index]
else:
style = self.xlwt.easyxf(
'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;')
style.pattern.pattern_fore_colour = self._file.xf_list[
cell.xf_index].background.pattern_colour_index
self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index] = style
style.font.name = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].name
style.font.bold = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].bold
if isinstance(self.header[y], tuple):
header = self.header[y][0]
else:
header = self.header[y]
if self.strip:
if is_str_or_unicode(cell.value):
cell.value = cell.value.strip()
if self.style:
return {header: (cell.value, style)}
else:
return {header: cell.value} | reads the cell at position x and y; puts the default styles in xlwt |
def bna_config_cmd_status_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd_status = ET.Element("bna_config_cmd_status")
config = bna_config_cmd_status
input = ET.SubElement(bna_config_cmd_status, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _get_template(self, event, ctype, fields):
"""Gets the contents of the template for the specified event and type
with all the fields replaced.
:arg event: one of ['start', 'error', 'success', 'timeout', 'failure'].
:arg ctype: one of ["txt", "html"] specifying which template to use.
:arg fields: a dictionary of fields and their replacement values to
insert.
"""
from os import path
template = path.join(self.server.dirname, "templates", "{}.{}".format(event, ctype))
contents = None
if path.isfile(template):
with open(template) as f:
#The templates are very small, so we don't need to worry about the file size.
contents = f.read()
for field, value in fields.items():
contents = contents.replace(field, value)
else:
raise ValueError("The event '{}' is not supported or ".format(event) +
"the template file ({}) is missing.".format(template))
return contents | Gets the contents of the template for the specified event and type
with all the fields replaced.
:arg event: one of ['start', 'error', 'success', 'timeout', 'failure'].
:arg ctype: one of ["txt", "html"] specifying which template to use.
:arg fields: a dictionary of fields and their replacement values to
insert. |
def _build_gui(self):
"""
Removes all existing sliders and rebuilds them based on the colormap.
"""
# remove all widgets (should destroy all children too)
self._central_widget.deleteLater()
# remove all references to other controls
self._sliders = []
self._buttons_top_color = []
self._buttons_bottom_color = []
self._checkboxes = []
self._buttons_plus = []
self._buttons_minus = []
self._color_dialogs_top = []
self._color_dialogs_bottom = []
# create the new central widget
self._central_widget = _qtw.QWidget()
self._window.setCentralWidget(self._central_widget)
# layout for main widget
self._layout = _qtw.QGridLayout(self._central_widget)
self._central_widget.setLayout(self._layout)
# add the list of cmaps
self._combobox_cmaps = _qtw.QComboBox(self._central_widget)
self._combobox_cmaps.setEditable(True)
self._load_cmap_list()
# add the save and delete buttons
self._button_save = _qtw.QPushButton("Save", self._central_widget)
self._button_delete = _qtw.QPushButton("Delete", self._central_widget)
self._button_save.setFixedWidth(70)
self._button_delete.setFixedWidth(70)
# layouts
self._layout.addWidget(self._combobox_cmaps, 1,1, 1,3, _qtcore.Qt.Alignment(0))
self._layout.addWidget(self._button_save, 1,5, 1,1, _qtcore.Qt.Alignment(1))
self._layout.addWidget(self._button_delete, 1,6, 1,2, _qtcore.Qt.Alignment(1))
# actions
self._combobox_cmaps.currentIndexChanged.connect(self._signal_load)
self._button_save .clicked.connect(self._button_save_clicked)
self._button_delete.clicked.connect(self._button_delete_clicked)
# ensmallen the window
self._window.resize(10,10)
# now create a control set for each color point
for n in range(len(self._colorpoint_list)):
c1 = self._colorpoint_list[n][1]
c2 = self._colorpoint_list[n][2]
# create a top-color button
self._buttons_top_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_top_color[-1].setStyleSheet("background-color: rgb("+str(int(c2[0]*255))+","+str(int(c2[1]*255))+","+str(int(c2[2]*255))+"); border-radius: 3px;")
# create a bottom-color button
self._buttons_bottom_color.append(_qtw.QPushButton(self._central_widget))
self._buttons_bottom_color[-1].setStyleSheet("background-color: rgb("+str(int(c1[0]*255))+","+str(int(c1[1]*255))+","+str(int(c1[2]*255))+"); border-radius: 3px;")
# create color dialogs
self._color_dialogs_top.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_top[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
self._color_dialogs_bottom.append(_qtw.QColorDialog(self._central_widget))
self._color_dialogs_bottom[-1].setCurrentColor(self._buttons_top_color[-1].palette().color(1))
# create link checkboxes
self._checkboxes.append(_qtw.QCheckBox(self._central_widget))
self._checkboxes[-1].setChecked(c1==c2)
# create a slider
self._sliders.append(_qtw.QSlider(self._central_widget))
self._sliders[-1].setOrientation(_qtcore.Qt.Horizontal)
self._sliders[-1].setMaximum(1000)
self._sliders[-1].setValue(int(self._colorpoint_list[n][0]*1000))
self._sliders[-1].setFixedWidth(250)
# create + and - buttons
self._buttons_plus.append(_qtw.QPushButton(self._central_widget))
self._buttons_plus[-1].setText("+")
self._buttons_plus[-1].setFixedWidth(25)
self._buttons_minus.append(_qtw.QPushButton(self._central_widget))
self._buttons_minus[-1].setText("-")
self._buttons_minus[-1].setFixedWidth(25)
# layout
self._layout.addWidget(self._buttons_bottom_color[-1], n+3,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._checkboxes[-1], n+3,2, 1,1, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_top_color[-1], n+3,3, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._sliders[-1], n+3,4, 1,2, _qtcore.Qt.AlignCenter)
self._layout.setColumnStretch(5,100)
self._layout.addWidget(self._buttons_minus[-1], n+3,7, _qtcore.Qt.AlignCenter)
self._layout.addWidget(self._buttons_plus[-1], n+3,6, _qtcore.Qt.AlignCenter)
# connect the buttons and slider actions to the calls
self._buttons_bottom_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 0))
self._buttons_top_color[-1] .clicked.connect(_partial(self._color_button_clicked, n, 1))
self._color_dialogs_bottom[-1].currentColorChanged.connect(_partial(self._color_dialog_changed, n, 0))
self._color_dialogs_top[-1] .currentColorChanged.connect(_partial(self._color_dialog_changed, n, 1))
self._buttons_plus[-1] .clicked.connect(_partial(self._button_plus_clicked, n))
self._buttons_minus[-1] .clicked.connect(_partial(self._button_minus_clicked, n))
self._sliders[-1] .valueChanged.connect(_partial(self._slider_changed, n))
# disable the appropriate sliders
self._sliders[0] .setDisabled(True)
self._sliders[-1].setDisabled(True) | Removes all existing sliders and rebuilds them based on the colormap. |
def plot_forward_models(self, maglim=None, phalim=None, **kwargs):
"""Create plots of the forward models
Returns
-------
mag_fig: dict
Dictionary containing the figure and axes objects of the magnitude
plots
"""
return_dict = {}
N = len(self.frequencies)
nrx = min(N, 4)
nrz = int(np.ceil(N / nrx))
for index, key, limits in zip(
(0, 1), ('rmag', 'rpha'), (maglim, phalim)):
if limits is None:
cbmin = None
cbmax = None
else:
cbmin = limits[0]
cbmax = limits[1]
fig, axes = plt.subplots(
nrz, nrx,
figsize=(16 / 2.54, nrz * 3 / 2.54),
sharex=True, sharey=True,
)
for ax in axes.flat:
ax.set_visible(False)
for ax, frequency in zip(axes.flat, self.frequencies):
ax.set_visible(True)
td = self.tds[frequency]
pids = td.a['forward_model']
td.plot.plot_elements_to_ax(
pids[index],
ax=ax,
plot_colorbar=True,
cbposition='horizontal',
cbmin=cbmin,
cbmax=cbmax,
**kwargs
)
for ax in axes[0:-1, :].flat:
ax.set_xlabel('')
for ax in axes[:, 1:].flat:
ax.set_ylabel('')
fig.tight_layout()
return_dict[key] = {
'fig': fig,
'axes': axes,
}
return return_dict | Create plots of the forward models
Returns
-------
mag_fig: dict
Dictionary containing the figure and axes objects of the magnitude
plots |
def _arrays_to_sections(self, arrays):
'''
input: unprocessed numpy arrays.
returns: columns of the size that they will appear in the image, not scaled
for display. That needs to wait until after variance is computed.
'''
sections = []
sections_to_resize_later = {}
show_all = self.config['show_all']
image_width = self._determine_image_width(arrays, show_all)
for array_number, array in enumerate(arrays):
rank = len(array.shape)
section_height = self._determine_section_height(array, show_all)
if rank == 1:
section = np.atleast_2d(array)
elif rank == 2:
section = array
elif rank == 4:
section = self._reshape_conv_array(array, section_height, image_width)
else:
section = self._reshape_irregular_array(array,
section_height,
image_width)
# Only calculate variance for what we have to. In some cases (biases),
# the section is larger than the array, so we don't want to calculate
# variance for the same value over and over - better to resize later.
# About a 6-7x speedup for a big network with a big variance window.
section_size = section_height * image_width
array_size = np.prod(array.shape)
if section_size > array_size:
sections.append(section)
sections_to_resize_later[array_number] = section_height
else:
sections.append(im_util.resize(section, section_height, image_width))
self.sections_over_time.append(sections)
if self.config['mode'] == 'variance':
sections = self._sections_to_variance_sections(self.sections_over_time)
for array_number, height in sections_to_resize_later.items():
sections[array_number] = im_util.resize(sections[array_number],
height,
image_width)
return sections | input: unprocessed numpy arrays.
returns: columns of the size that they will appear in the image, not scaled
for display. That needs to wait until after variance is computed. |
def AddClientKeywords(self, client_id, keywords):
"""Associates the provided keywords with the client."""
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
for kw in keywords:
self.keywords.setdefault(kw, {})
self.keywords[kw][client_id] = rdfvalue.RDFDatetime.Now() | Associates the provided keywords with the client. |
def data_slice(self, slice_ind):
""" Returns a slice of datapoints """
if self.height is None:
return self.data[slice_ind]
return self.data[slice_ind, ...] | Returns a slice of datapoints |
def pfopen(self, event=None):
""" Load the parameter settings from a user-specified file. """
# Get the selected file name
fname = self._openMenuChoice.get()
# Also allow them to simply find any file - do not check _task_name_...
# (could use tkinter's FileDialog, but this one is prettier)
if fname[-3:] == '...':
if capable.OF_TKFD_IN_EPAR:
fname = askopenfilename(title="Load Config File",
parent=self.top)
else:
from . import filedlg
fd = filedlg.PersistLoadFileDialog(self.top,
"Load Config File",
self._getSaveAsFilter())
if fd.Show() != 1:
fd.DialogCleanup()
return
fname = fd.GetFileName()
fd.DialogCleanup()
if not fname: return # canceled
self.debug('Loading from: '+fname)
# load it into a tmp object (use associatedPkg if we have one)
try:
tmpObj = cfgpars.ConfigObjPars(fname, associatedPkg=\
self._taskParsObj.getAssocPkg(),
strict=self._strict)
except Exception as ex:
showerror(message=ex.message, title='Error in '+os.path.basename(fname))
self.debug('Error in '+os.path.basename(fname))
self.debug(traceback.format_exc())
return
# check it to make sure it is a match
if not self._taskParsObj.isSameTaskAs(tmpObj):
msg = 'The current task is "'+self._taskParsObj.getName()+ \
'", but the selected file is for task "'+ \
str(tmpObj.getName())+'". This file was not loaded.'
showerror(message=msg, title="Error in "+os.path.basename(fname))
self.debug(msg)
self.debug(traceback.format_exc())
return
# Set the GUI entries to these values (let the user Save after)
newParList = tmpObj.getParList()
try:
self.setAllEntriesFromParList(newParList, updateModel=True)
# go ahead and updateModel, even though it will take longer,
# we need it updated for the copy of the dict we make below
except editpar.UnfoundParamError as pe:
showwarning(message=str(pe), title="Error in "+os.path.basename(fname))
# trip any triggers
self.checkAllTriggers('fopen')
# This new fname is our current context
self.updateTitle(fname)
self._taskParsObj.filename = fname # !! maybe try setCurrentContext() ?
self.freshenFocus()
self.showStatus("Loaded values from: "+fname, keep=2)
# Since we are in a new context (and have made no changes yet), make
# a copy so we know what the last state was.
# The dict() method returns a deep-copy dict of the keyvals.
self._lastSavedState = self._taskParsObj.dict() | Load the parameter settings from a user-specified file. |
def add_poisson(image, exp_time):
"""
adds a poison (or Gaussian) distributed noise with mean given by surface brightness
:param image: pixel values (photon counts per unit exposure time)
:param exp_time: exposure time
:return: Poisson noise realization of input image
"""
"""
adds a poison (or Gaussian) distributed noise with mean given by surface brightness
"""
if isinstance(exp_time, int) or isinstance(exp_time, float):
if exp_time <= 0:
exp_time = 1
else:
mean_exp_time = np.mean(exp_time)
exp_time[exp_time < mean_exp_time/10] = mean_exp_time/10
sigma = np.sqrt(np.abs(image)/exp_time) # Gaussian approximation for Poisson distribution, normalized to exposure time
nx, ny = np.shape(image)
poisson = np.random.randn(nx, ny) * sigma
return poisson | adds a poison (or Gaussian) distributed noise with mean given by surface brightness
:param image: pixel values (photon counts per unit exposure time)
:param exp_time: exposure time
:return: Poisson noise realization of input image |
def ParseZeitgeistEventRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ZeitgeistActivityEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. |
def _parse_key(stream):
"""Parse key, value combination
returns :
Parsed key (string)
"""
logger.debug("parsing key")
key = stream.advance_past_chars(["="])
logger.debug("parsed key:")
logger.debug("%s", fmt_green(key))
return key | Parse key, value combination
returns :
Parsed key (string) |
def setShapeClass(self, vehID, clazz):
"""setShapeClass(string, string) -> None
Sets the shape class for this vehicle.
"""
self._connection._sendStringCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_SHAPECLASS, vehID, clazz) | setShapeClass(string, string) -> None
Sets the shape class for this vehicle. |
def embed(self, title=''):
"""Start an IPython embed
Calling embed won't do anything in a multithread context
The stack_depth will be found automatically
"""
if self.embed_disabled:
self.warning_log("Embed is disabled when runned from the grid runner because of the multithreading") # noqa
return False
from IPython.terminal.embed import InteractiveShellEmbed
if BROME_CONFIG['runner']['play_sound_on_ipython_embed']:
say(BROME_CONFIG['runner']['sound_on_ipython_embed'])
ipshell = InteractiveShellEmbed(banner1=title)
frame = currentframe()
stack_depth = 1
for i in range(5):
frame = frame.f_back
stack_depth += 1
if frame.f_code.co_filename not in __file__:
break
msg = 'Stopped at %s and line %s;' % \
(frame.f_code.co_filename, frame.f_lineno)
ipshell(msg, stack_depth=stack_depth) | Start an IPython embed
Calling embed won't do anything in a multithread context
The stack_depth will be found automatically |
def ack(self):
"""Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.acknowledged:
raise self.MessageStateError(
"Message already acknowledged with state: %s" % self._state)
self.backend.ack(self.delivery_tag)
self._state = "ACK" | Acknowledge this message as being processed.,
This will remove the message from the queue.
:raises MessageStateError: If the message has already been
acknowledged/requeued/rejected. |
def _merge_variables(new, cur):
"""Add any new variables to the world representation in cur.
Replaces any variables adjusted by previous steps.
"""
new_added = set([])
out = []
for cur_var in cur:
updated = False
for new_var in new:
if get_base_id(new_var["id"]) == get_base_id(cur_var["id"]):
out.append(new_var)
new_added.add(new_var["id"])
updated = True
break
if not updated:
out.append(cur_var)
for new_var in new:
if new_var["id"] not in new_added:
out.append(new_var)
return out | Add any new variables to the world representation in cur.
Replaces any variables adjusted by previous steps. |
def create_entity_type(project_id, display_name, kind):
"""Create an entity type with the given display name."""
import dialogflow_v2 as dialogflow
entity_types_client = dialogflow.EntityTypesClient()
parent = entity_types_client.project_agent_path(project_id)
entity_type = dialogflow.types.EntityType(
display_name=display_name, kind=kind)
response = entity_types_client.create_entity_type(parent, entity_type)
print('Entity type created: \n{}'.format(response)) | Create an entity type with the given display name. |
def coords_by_cutoff(self, cutoff=0.80):
""" Returns fitted coordinates in as many dimensions as are needed to
explain a given amount of variance (specified in the cutoff) """
i = np.where(self.cve >= cutoff)[0][0]
coords_matrix = self.vecs[:, :i + 1]
return coords_matrix, self.cve[i] | Returns fitted coordinates in as many dimensions as are needed to
explain a given amount of variance (specified in the cutoff) |
def is_integer(value, min=None, max=None):
"""
A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0
"""
(min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
if not isinstance(value, int_or_string_types):
raise VdtTypeError(value)
if isinstance(value, string_types):
# if it's a string - does it represent an integer ?
try:
value = int(value)
except ValueError:
raise VdtTypeError(value)
if (min_val is not None) and (value < min_val):
raise VdtValueTooSmallError(value)
if (max_val is not None) and (value > max_val):
raise VdtValueTooBigError(value)
return value | A check that tests that a given value is an integer (int, or long)
and optionally, between bounds. A negative value is accepted, while
a float will fail.
If the value is a string, then the conversion is done - if possible.
Otherwise a VdtError is raised.
>>> vtor = Validator()
>>> vtor.check('integer', '-1')
-1
>>> vtor.check('integer', '0')
0
>>> vtor.check('integer', 9)
9
>>> vtor.check('integer', 'a') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "a" is of the wrong type.
>>> vtor.check('integer', '2.2') # doctest: +SKIP
Traceback (most recent call last):
VdtTypeError: the value "2.2" is of the wrong type.
>>> vtor.check('integer(10)', '20')
20
>>> vtor.check('integer(max=20)', '15')
15
>>> vtor.check('integer(10)', '9') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(10)', 9) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooSmallError: the value "9" is too small.
>>> vtor.check('integer(max=20)', '35') # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(max=20)', 35) # doctest: +SKIP
Traceback (most recent call last):
VdtValueTooBigError: the value "35" is too big.
>>> vtor.check('integer(0, 9)', False)
0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.