code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def load(fp: Union[TextIO, str], load_module: types.ModuleType, **kwargs):
""" Convert a file name or file-like object containing stringified JSON into a JSGObject
:param fp: file-like object to deserialize
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string
"""
if isinstance(fp, str):
with open(fp) as f:
return loads(f.read(), load_module, **kwargs)
else:
return loads(fp.read(), load_module, **kwargs) | Convert a file name or file-like object containing stringified JSON into a JSGObject
:param fp: file-like object to deserialize
:param load_module: module that contains declarations for types
:param kwargs: arguments see: json.load for details
:return: JSGObject representing the json string |
def reassign_proficiency_to_objective_bank(self, objective_id, from_objective_bank_id, to_objective_bank_id):
"""Moves an ``Objective`` from one ``ObjectiveBank`` to another.
Mappings to other ``ObjectiveBanks`` are unaffected.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: from_objective_bank_id (osid.id.Id): the ``Id`` of the
current ``ObjectiveBank``
arg: to_objective_bank_id (osid.id.Id): the ``Id`` of the
destination ``ObjectiveBank``
raise: NotFound - ``objective_id, from_objective_bank_id,`` or
``to_objective_bank_id`` not found or ``objective_id``
not mapped to ``from_objective_bank_id``
raise: NullArgument - ``objective_id, from_objective_bank_id,``
or ``to_objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin
self.assign_objective_to_objective_bank(objective_id, to_objective_bank_id)
try:
self.unassign_objective_from_objective_bank(objective_id, from_objective_bank_id)
except: # something went wrong, roll back assignment to to_objective_bank_id
self.unassign_objective_from_objective_bank(objective_id, to_objective_bank_id)
raise | Moves an ``Objective`` from one ``ObjectiveBank`` to another.
Mappings to other ``ObjectiveBanks`` are unaffected.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: from_objective_bank_id (osid.id.Id): the ``Id`` of the
current ``ObjectiveBank``
arg: to_objective_bank_id (osid.id.Id): the ``Id`` of the
destination ``ObjectiveBank``
raise: NotFound - ``objective_id, from_objective_bank_id,`` or
``to_objective_bank_id`` not found or ``objective_id``
not mapped to ``from_objective_bank_id``
raise: NullArgument - ``objective_id, from_objective_bank_id,``
or ``to_objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def map(cls, x, palette, limits, na_value=None):
"""
Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette
"""
n = len(limits)
pal = palette(n)[match(x, limits)]
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal | Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette |
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, '_properties', None):
spin = site.specie._properties.get('spin', None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",")[0]
new_properties.update({
'spin': sign * self.mag_species_spin.get(sp, 0)
})
new_specie = Specie(site.specie.symbol,
getattr(site.specie, 'oxi_state', None),
new_properties)
structure.replace(idx, new_specie,
properties=site.properties)
logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure)))
return structure | Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return: |
def _updateConstructorAndMembers(self):
"""We overwrite constructor and accessors every time because the constructor might have to consume all
members even if their decorator is below the "synthesizeConstructor" decorator and it also might need to update
the getters and setters because the naming convention has changed.
"""
syntheticMetaData = self._syntheticMetaData()
constructor = self._constructorFactory.makeConstructor(syntheticMetaData.originalConstructor(),
syntheticMetaData.syntheticMemberList(),
syntheticMetaData.doesConsumeArguments())
self._class.__init__ = constructor
for syntheticMember in syntheticMetaData.syntheticMemberList():
syntheticMember.apply(self._class,
syntheticMetaData.originalMemberNameList(),
syntheticMetaData.namingConvention())
if syntheticMetaData.hasEqualityGeneration():
eq = self._comparisonFactory.makeEqualFunction(syntheticMetaData.originalEqualFunction(),
syntheticMetaData.syntheticMemberList())
ne = self._comparisonFactory.makeNotEqualFunction(syntheticMetaData.originalNotEqualFunction(),
syntheticMetaData.syntheticMemberList())
hashFunc = self._comparisonFactory.makeHashFunction(syntheticMetaData.originalHashFunction(),
syntheticMetaData.syntheticMemberList())
self._class.__eq__ = eq
self._class.__ne__ = ne
self._class.__hash__ = hashFunc | We overwrite constructor and accessors every time because the constructor might have to consume all
members even if their decorator is below the "synthesizeConstructor" decorator and it also might need to update
the getters and setters because the naming convention has changed. |
def space_new(args):
""" Create a new workspace. """
r = fapi.create_workspace(args.project, args.workspace,
args.authdomain, dict())
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
eprint(r.content)
return 0 | Create a new workspace. |
def _set_cfm_detail(self, v, load=False):
"""
Setter method for cfm_detail, mapped from YANG variable /cfm_state/cfm_detail (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cfm_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cfm_detail() directly.
YANG Description: CFM Details
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cfm_detail.cfm_detail, is_container='container', presence=False, yang_name="cfm-detail", rest_name="cfm-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-cfm-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cfm_detail must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cfm_detail.cfm_detail, is_container='container', presence=False, yang_name="cfm-detail", rest_name="cfm-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-cfm-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=False)""",
})
self.__cfm_detail = t
if hasattr(self, '_set'):
self._set() | Setter method for cfm_detail, mapped from YANG variable /cfm_state/cfm_detail (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cfm_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cfm_detail() directly.
YANG Description: CFM Details |
def fire(self, exclude=None, delay=True):
"""Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately.
"""
if delay:
# Tasks don't receive the `self` arg implicitly.
self._fire_task.apply_async(
args=(self,),
kwargs={'exclude': exclude},
serializer='pickle')
else:
self._fire_task(self, exclude=exclude) | Notify everyone watching the event.
We are explicit about sending notifications; we don't just key off
creation signals, because the receiver of a ``post_save`` signal has no
idea what just changed, so it doesn't know which notifications to send.
Also, we could easily send mail accidentally: for instance, during
tests. If we want implicit event firing, we can always register a
signal handler that calls :meth:`fire()`.
:arg exclude: If a saved user is passed in, that user will not be
notified, though anonymous notifications having the same email
address may still be sent. A sequence of users may also be passed in.
:arg delay: If True (default), the event is handled asynchronously with
Celery. This requires the pickle task serializer, which is no longer
the default starting in Celery 4.0. If False, the event is processed
immediately. |
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence | Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one. |
def await_transform_exists(cli, transform_path, does_exist=DEFAULT_TRANSFORM_EXISTS, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
"""
Waits for a single transform to exist based on does_exist.
:param cli:
:param transform_path:
:param does_exist: Whether or not to await for exist state (True | False)
:param timeout_seconds: How long until this returns with failure
:return: bool
"""
message_payload = {
"transform_paths": [transform_path],
"do_exist": does_exist,
"match_mode": "All",
"timeout": timeout_seconds
}
msg = message.Message("await.unity.transform.exists", message_payload)
cli.send_message(msg)
response = cli.read_message()
verify_response(response)
return bool(response['payload']['success']) | Waits for a single transform to exist based on does_exist.
:param cli:
:param transform_path:
:param does_exist: Whether or not to await for exist state (True | False)
:param timeout_seconds: How long until this returns with failure
:return: bool |
def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and 'cell_type' in self.metadata:
self.cell_type = self.metadata.pop('cell_type')
else:
self.cell_type = 'code'
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (
self.simple_start_code_re and self.simple_start_code_re.match(line) and
(self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return i, i + 1, True
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False | Find the end of the region started with start and end markers |
def days_to_liquidate_positions(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5):
"""
Compute the number of days that would have been required
to fully liquidate each position on each day based on the
trailing n day mean daily bar volume and a limit on the proportion
of a daily bar that we are allowed to consume.
This analysis uses portfolio allocations and a provided capital base
rather than the dollar values in the positions DataFrame to remove the
effect of compounding on days to liquidate. In other words, this function
assumes that the net liquidation portfolio value will always remain
constant at capital_base.
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
Returns
-------
days_to_liquidate : pd.DataFrame
Number of days required to fully liquidate daily positions.
Datetime index, symbols as columns.
"""
DV = market_data['volume'] * market_data['price']
roll_mean_dv = DV.rolling(window=mean_volume_window,
center=False).mean().shift()
roll_mean_dv = roll_mean_dv.replace(0, np.nan)
positions_alloc = pos.get_percent_alloc(positions)
positions_alloc = positions_alloc.drop('cash', axis=1)
days_to_liquidate = (positions_alloc * capital_base) / \
(max_bar_consumption * roll_mean_dv)
return days_to_liquidate.iloc[mean_volume_window:] | Compute the number of days that would have been required
to fully liquidate each position on each day based on the
trailing n day mean daily bar volume and a limit on the proportion
of a daily bar that we are allowed to consume.
This analysis uses portfolio allocations and a provided capital base
rather than the dollar values in the positions DataFrame to remove the
effect of compounding on days to liquidate. In other words, this function
assumes that the net liquidation portfolio value will always remain
constant at capital_base.
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
Returns
-------
days_to_liquidate : pd.DataFrame
Number of days required to fully liquidate daily positions.
Datetime index, symbols as columns. |
def writeCmdMsg(self, msg):
""" Internal method to set the command result string.
Args:
msg (str): Message built during command.
"""
ekm_log("(writeCmdMsg | " + self.getContext() + ") " + msg)
self.m_command_msg = msg | Internal method to set the command result string.
Args:
msg (str): Message built during command. |
def listify(val, return_type=tuple):
"""
Examples:
>>> listify('abc', return_type=list)
['abc']
>>> listify(None)
()
>>> listify(False)
(False,)
>>> listify(('a', 'b', 'c'), return_type=list)
['a', 'b', 'c']
"""
# TODO: flatlistify((1, 2, 3), 4, (5, 6, 7))
if val is None:
return return_type()
elif isiterable(val):
return return_type(val)
else:
return return_type((val, )) | Examples:
>>> listify('abc', return_type=list)
['abc']
>>> listify(None)
()
>>> listify(False)
(False,)
>>> listify(('a', 'b', 'c'), return_type=list)
['a', 'b', 'c'] |
def original_query_sequence_length(self):
"""Similar to get_get_query_sequence_length, but it also includes
hard clipped bases
if there is no cigar, then default to trying the sequence
:return: the length of the query before any clipping
:rtype: int
"""
if not self.is_aligned() or not self.entries.cigar:
return self.query_sequence_length # take the naive approach
# we are here with something aligned so take more intelligent cigar apporach
return sum([x[0] for x in self.cigar_array if re.match('[HMIS=X]',x[1])]) | Similar to get_get_query_sequence_length, but it also includes
hard clipped bases
if there is no cigar, then default to trying the sequence
:return: the length of the query before any clipping
:rtype: int |
def quote_by_instruments(cls, client, ids):
"""
create instrument urls, fetch, return results
"""
base_url = "https://api.robinhood.com/instruments"
id_urls = ["{}/{}/".format(base_url, _id) for _id in ids]
return cls.quotes_by_instrument_urls(client, id_urls) | create instrument urls, fetch, return results |
def user_absent(name, channel=14, **kwargs):
'''
Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
user_id_list = __salt__['ipmi.get_name_uids'](name, channel, **kwargs)
if not user_id_list:
ret['result'] = True
ret['comment'] = 'user already absent'
return ret
if __opts__['test']:
ret['comment'] = 'would delete user(s)'
ret['result'] = None
ret['changes'] = {'delete': user_id_list}
return ret
for uid in user_id_list:
__salt__['ipmi.delete_user'](uid, channel, **kwargs)
ret['comment'] = 'user(s) removed'
ret['changes'] = {'old': user_id_list, 'new': 'None'}
return ret | Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None |
def show_lowstate(**kwargs):
'''
List out the low data that will be applied to this minion
CLI Example:
.. code-block:: bash
salt '*' state.show_lowstate
'''
__opts__['grains'] = __grains__
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
st_ = salt.client.ssh.state.SSHHighState(
opts,
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
chunks = st_.compile_low_chunks()
_cleanup_slsmod_low_data(chunks)
return chunks | List out the low data that will be applied to this minion
CLI Example:
.. code-block:: bash
salt '*' state.show_lowstate |
def _mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1 - i]:
f_values[idx[i]] += m
else:
f_values[-1 - i] += m
return f_values | obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``. |
def file_like(name):
"""A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier.
"""
return (os.path.exists(name)
or os.path.dirname(name)
or name.endswith('.py')
or not ident_re.match(os.path.splitext(name)[0])) | A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier. |
def confirm(text, default=False, abort=False, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default,
default and 'Y/n' or 'y/N')
while 1:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt, nl=False, err=err)
value = visible_prompt_func('').lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ('y', 'yes'):
rv = True
elif value in ('n', 'no'):
rv = False
elif value == '':
rv = default
else:
echo('Error: invalid input', err=err)
continue
break
if abort and not rv:
raise Abort()
return rv | Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo. |
def get_vulnerability(
source,
sink,
triggers,
lattice,
cfg,
interactive,
blackbox_mapping
):
"""Get vulnerability between source and sink if it exists.
Uses triggers to find sanitisers.
Note: When a secondary node is in_constraint with the sink
but not the source, the secondary is a save_N_LHS
node made in process_function in expr_visitor.
Args:
source(TriggerNode): TriggerNode of the source.
sink(TriggerNode): TriggerNode of the sink.
triggers(Triggers): Triggers of the CFG.
lattice(Lattice): the lattice we're analysing.
cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
Returns:
A Vulnerability if it exists, else None
"""
nodes_in_constraint = [
secondary
for secondary in reversed(source.secondary_nodes)
if lattice.in_constraint(
secondary,
sink.cfg_node
)
]
nodes_in_constraint.append(source.cfg_node)
if sink.trigger.all_arguments_propagate_taint:
sink_args = get_sink_args(sink.cfg_node)
else:
sink_args = get_sink_args_which_propagate(sink, sink.cfg_node.ast_node)
tainted_node_in_sink_arg = get_tainted_node_in_sink_args(
sink_args,
nodes_in_constraint,
)
if tainted_node_in_sink_arg:
vuln_deets = {
'source': source.cfg_node,
'source_trigger_word': source.trigger_word,
'sink': sink.cfg_node,
'sink_trigger_word': sink.trigger_word
}
sanitiser_nodes = set()
potential_sanitiser = None
if sink.sanitisers:
for sanitiser in sink.sanitisers:
for cfg_node in triggers.sanitiser_dict[sanitiser]:
if isinstance(cfg_node, AssignmentNode):
sanitiser_nodes.add(cfg_node)
elif isinstance(cfg_node, IfNode):
potential_sanitiser = cfg_node
def_use = build_def_use_chain(
cfg.nodes,
lattice
)
for chain in get_vulnerability_chains(
source.cfg_node,
sink.cfg_node,
def_use
):
vulnerability_type, interactive = how_vulnerable(
chain,
blackbox_mapping,
sanitiser_nodes,
potential_sanitiser,
cfg.blackbox_assignments,
interactive,
vuln_deets
)
if vulnerability_type == VulnerabilityType.FALSE:
continue
vuln_deets['reassignment_nodes'] = chain
return vuln_factory(vulnerability_type)(**vuln_deets), interactive
return None, interactive | Get vulnerability between source and sink if it exists.
Uses triggers to find sanitisers.
Note: When a secondary node is in_constraint with the sink
but not the source, the secondary is a save_N_LHS
node made in process_function in expr_visitor.
Args:
source(TriggerNode): TriggerNode of the source.
sink(TriggerNode): TriggerNode of the sink.
triggers(Triggers): Triggers of the CFG.
lattice(Lattice): the lattice we're analysing.
cfg(CFG): .blackbox_assignments used in is_unknown, .nodes used in build_def_use_chain
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
blackbox_mapping(dict): A map of blackbox functions containing whether or not they propagate taint.
Returns:
A Vulnerability if it exists, else None |
def _init_trace_logging(self, app):
"""
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler) | Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. |
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_known_hosts=True,
timeout=5,
fingerprint_hash_type=None):
'''
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret['result'] = False
return dict(ret, comment=comment)
if __opts__['test']:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret['result'] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret['result'] = False
return dict(ret, comment=comment)
try:
result = __salt__['ssh.check_known_host'](user, name,
key=key,
fingerprint=fingerprint,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'ssh.check_known_host error: {0}'.format(err)
return ret
if result == 'exists':
comment = 'Host {0} is already in {1}'.format(name, config)
ret['result'] = True
return dict(ret, comment=comment)
elif result == 'add':
comment = 'Key for {0} is set to be added to {1}'.format(name,
config)
return dict(ret, comment=comment)
else: # 'update'
comment = 'Key for {0} is set to be updated in {1}'.format(name,
config)
return dict(ret, comment=comment)
result = __salt__['ssh.set_known_host'](
user=user,
hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout,
fingerprint_hash_type=fingerprint_hash_type)
if result['status'] == 'exists':
return dict(ret,
comment='{0} already exists in {1}'.format(name, config))
elif result['status'] == 'error':
return dict(ret, result=False, comment=result['error'])
else: # 'updated'
if key:
new_key = result['new'][0]['key']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (key: {2})'.format(
name, config, new_key))
else:
fingerprint = result['new'][0]['fingerprint']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (fingerprint: {2})'.format(
name, config, fingerprint)) | Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256`` |
def get_subdomain_DID_info(fqn, db_path=None, zonefiles_dir=None):
"""
Get a subdomain's DID info.
Return None if not found
"""
opts = get_blockstack_opts()
if not is_subdomains_enabled(opts):
log.warn("Subdomain support is disabled")
return None
if db_path is None:
db_path = opts['subdomaindb_path']
if zonefiles_dir is None:
zonefiles_dir = opts['zonefiles']
db = SubdomainDB(db_path, zonefiles_dir)
try:
subrec = db.get_subdomain_entry(fqn)
except SubdomainNotFound:
log.warn("No such subdomain: {}".format(fqn))
return None
try:
return db.get_subdomain_DID_info(fqn)
except SubdomainNotFound:
return None | Get a subdomain's DID info.
Return None if not found |
def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform) | Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters. |
def _copy_from(self, rhs):
"""Copy all data from rhs into this instance, handles usage count"""
self._manager = rhs._manager
self._rlist = type(rhs._rlist)(rhs._rlist)
self._region = rhs._region
self._ofs = rhs._ofs
self._size = rhs._size
for region in self._rlist:
region.increment_client_count()
if self._region is not None:
self._region.increment_client_count() | Copy all data from rhs into this instance, handles usage count |
def OnCellFontSize(self, event):
"""Cell font size event handler"""
with undo.group(_("Font size")):
self.grid.actions.set_attr("pointsize", event.size)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() | Cell font size event handler |
def transpose_list(list_of_dicts):
"""Transpose a list of dicts to a dict of lists
:param list_of_dicts: to transpose, as in the output from a parse call
:return: Dict of lists
"""
res = {}
for d in list_of_dicts:
for k, v in d.items():
if k in res:
res[k].append(v)
else:
res[k] = [v]
return res | Transpose a list of dicts to a dict of lists
:param list_of_dicts: to transpose, as in the output from a parse call
:return: Dict of lists |
def run(cmd, *args, **kwargs):
"""Echo a command before running it. Defaults to repo as cwd"""
log.info('> ' + list2cmdline(cmd))
kwargs.setdefault('cwd', here)
kwargs.setdefault('shell', sys.platform == 'win32')
if not isinstance(cmd, list):
cmd = cmd.split()
return check_call(cmd, *args, **kwargs) | Echo a command before running it. Defaults to repo as cwd |
def _sig(self, name, dtype=BIT, defVal=None):
"""
Create signal in this unit
"""
if isinstance(dtype, HStruct):
if defVal is not None:
raise NotImplementedError()
container = dtype.fromPy(None)
for f in dtype.fields:
if f.name is not None:
r = self._sig("%s_%s" % (name, f.name), f.dtype)
setattr(container, f.name, r)
return container
return self._ctx.sig(name, dtype=dtype, defVal=defVal) | Create signal in this unit |
def run(data, samples, force, ipyclient):
"""
Check all samples requested have been clustered (state=6), make output
directory, then create the requested outfiles. Excluded samples are already
removed from samples.
"""
## prepare dirs
data.dirs.outfiles = os.path.join(data.dirs.project, data.name+"_outfiles")
if not os.path.exists(data.dirs.outfiles):
os.mkdir(data.dirs.outfiles)
## make the snps/filters data base, fills the dups and inds filters
## and fills the splits locations
data.database = os.path.join(data.dirs.outfiles, data.name+".hdf5")
init_arrays(data)
## Apply filters to supercatg and superhdf5 with selected samples
## and fill the filters and edge arrays.
filter_all_clusters(data, samples, ipyclient)
## Everything needed is in the now filled h5 database. Filters were applied
## with 'samples' taken into account. Now we create the loci file (default)
## output and build a stats file.
data.outfiles.loci = os.path.join(data.dirs.outfiles, data.name+".loci")
data.outfiles.alleles = os.path.join(data.dirs.outfiles, data.name+".alleles.loci")
make_loci_and_stats(data, samples, ipyclient)
## OPTIONAL OUTPUTS:
output_formats = data.paramsdict["output_formats"]
## held separate from *output_formats cuz it's big and parallelized
if any([x in output_formats for x in ["v", "V"]]):
full = "V" in output_formats
try:
make_vcf(data, samples, ipyclient, full=full)
except IPyradWarningExit as inst:
## Something fsck vcf build. Sometimes this is simply a memory
## issue, so trap the exception and allow it to try building
## the other output formats.
print(" Error building vcf. See ipyrad_log.txt for details.")
LOGGER.error(inst)
## make other array-based formats, recalcs keeps and arrays
make_outfiles(data, samples, output_formats, ipyclient)
## print friendly message
shortpath = data.dirs.outfiles.replace(os.path.expanduser("~"), "~")
print("{}Outfiles written to: {}\n".format(data._spacer, shortpath)) | Check all samples requested have been clustered (state=6), make output
directory, then create the requested outfiles. Excluded samples are already
removed from samples. |
def convert_bam_to_bed(in_bam, out_file):
"""Convert BAM to bed file using BEDTools.
"""
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
subprocess.check_call(["bamToBed", "-i", in_bam, "-tag", "NM"],
stdout=out_handle)
return out_file | Convert BAM to bed file using BEDTools. |
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file. |
def probability_density(self, X):
"""Compute density function for given copula family.
Args:
X: `np.ndarray`
Returns:
np.array: probability density
"""
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 0:
return np.multiply(U, V)
else:
num = np.multiply(np.multiply(-self.theta, self._g(1)), 1 + self._g(np.add(U, V)))
aux = np.multiply(self._g(U), self._g(V)) + self._g(1)
den = np.power(aux, 2)
return num / den | Compute density function for given copula family.
Args:
X: `np.ndarray`
Returns:
np.array: probability density |
def _request_prepare(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None,
stash_data: Optional[Tuple[str, str, str]] = None) -> bool:
"""
Request preprepare
"""
if recipients is None:
recipients = self.node.nodestack.connecteds.copy()
primaryName = self.primaryName[:self.primaryName.rfind(":")]
recipients.discard(primaryName)
return self._request_three_phase_msg(three_pc_key, self.requested_prepares, PREPARE, recipients, stash_data) | Request preprepare |
def hide_file(path):
"""
Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text.
"""
__import__('ctypes.wintypes')
SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
SetFileAttributes.restype = ctypes.wintypes.BOOL
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError() | Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text. |
def ServiceWorker_startWorker(self, scopeURL):
"""
Function path: ServiceWorker.startWorker
Domain: ServiceWorker
Method name: startWorker
Parameters:
Required arguments:
'scopeURL' (type: string) -> No description
No return value.
"""
assert isinstance(scopeURL, (str,)
), "Argument 'scopeURL' must be of type '['str']'. Received type: '%s'" % type(
scopeURL)
subdom_funcs = self.synchronous_command('ServiceWorker.startWorker',
scopeURL=scopeURL)
return subdom_funcs | Function path: ServiceWorker.startWorker
Domain: ServiceWorker
Method name: startWorker
Parameters:
Required arguments:
'scopeURL' (type: string) -> No description
No return value. |
def leagues(self, year=2019):
"""Return all leagues in dict {id0: league0, id1: league1}.
:params year: Year.
"""
if year not in self._leagues:
self._leagues[year] = leagues(year)
return self._leagues[year] | Return all leagues in dict {id0: league0, id1: league1}.
:params year: Year. |
def minimal_selector(self, complete_selector):
"""Returns the minimal selector that uniquely matches `complete_selector`.
Args:
complete_selector: A complete selector stored in the map.
Returns:
A partial selector that unambiguously matches `complete_selector`.
Raises:
KeyError: If `complete_selector` is not in the map.
"""
if complete_selector not in self._selector_map:
raise KeyError("No value with selector '{}'.".format(complete_selector))
selector_components = complete_selector.split('.')
node = self._selector_tree
start = None
for i, component in enumerate(reversed(selector_components)):
if len(node) == 1:
if start is None:
start = -i # Negative index, since we're iterating in reverse.
else:
start = None
node = node[component]
if len(node) > 1: # The selector is a substring of another selector.
return complete_selector
return '.'.join(selector_components[start:]) | Returns the minimal selector that uniquely matches `complete_selector`.
Args:
complete_selector: A complete selector stored in the map.
Returns:
A partial selector that unambiguously matches `complete_selector`.
Raises:
KeyError: If `complete_selector` is not in the map. |
def get_annotation_urls_and_checksums(species, release=None, ftp=None):
"""Get FTP URLs and checksums for Ensembl genome annotations.
Parameters
----------
species : str or list of str
The species or list of species for which to get genome annotations
(e.g., "Homo_sapiens").
release : int, optional
The release number to look up. If `None`, use latest release. [None]
ftp : ftplib.FTP, optional
The FTP connection to use. If `None`, the function will open and close
its own connection using user "anonymous".
"""
### type checks
assert isinstance(species, (str, _oldstr)) or isinstance(species, Iterable)
if release is not None:
assert isinstance(release, int)
if ftp is not None:
assert isinstance(ftp, ftplib.FTP)
### open FTP connection if necessary
close_connection = False
ftp_server = 'ftp.ensembl.org'
ftp_user = 'anonymous'
if ftp is None:
ftp = ftplib.FTP(ftp_server)
ftp.login(ftp_user)
close_connection = True
### determine release if necessary
if release is None:
# use latest release
release = util.get_latest_release(ftp=ftp)
species_data = OrderedDict()
if isinstance(species, (str, _oldstr)):
species_list = [species]
else:
species_list = species
for spec in species_list:
# get the GTF file URL
# => since the naming scheme isn't consistent across species,
# we're using a flexible scheme here to find the right file
species_dir = '/pub/release-%d/gtf/%s' % (release, spec.lower())
data = []
ftp.dir(species_dir, data.append)
gtf_file = []
for d in data:
i = d.rindex(' ')
fn = d[(i + 1):]
if fn.endswith('.%d.gtf.gz' % release):
gtf_file.append(fn)
assert len(gtf_file) == 1
gtf_file = gtf_file[0]
_LOGGER.debug('GTF file: %s', gtf_file)
### get the checksum for the GTF file
checksum_url = '/'.join([species_dir, 'CHECKSUMS'])
file_checksums = util.get_file_checksums(checksum_url, ftp=ftp)
gtf_checksum = file_checksums[gtf_file]
_LOGGER.debug('GTF file checksum: %d', gtf_checksum)
gtf_url = 'ftp://%s%s/%s' %(ftp_server, species_dir, gtf_file)
species_data[spec] = (gtf_url, gtf_checksum)
# close FTP connection, if we opened it
if close_connection:
ftp.close()
return species_data | Get FTP URLs and checksums for Ensembl genome annotations.
Parameters
----------
species : str or list of str
The species or list of species for which to get genome annotations
(e.g., "Homo_sapiens").
release : int, optional
The release number to look up. If `None`, use latest release. [None]
ftp : ftplib.FTP, optional
The FTP connection to use. If `None`, the function will open and close
its own connection using user "anonymous". |
def list_categories(self, package_keyname, **kwargs):
"""List the categories for the given package.
:param str package_keyname: The package for which to get the categories.
:returns: List of categories associated with the package
"""
get_kwargs = {}
get_kwargs['mask'] = kwargs.get('mask', CATEGORY_MASK)
if 'filter' in kwargs:
get_kwargs['filter'] = kwargs['filter']
package = self.get_package_by_key(package_keyname, mask='id')
categories = self.package_svc.getConfiguration(id=package['id'], **get_kwargs)
return categories | List the categories for the given package.
:param str package_keyname: The package for which to get the categories.
:returns: List of categories associated with the package |
def log_estimator_evaluation_result(self, eval_results):
"""Log the evaluation result for a estimator.
The evaluate result is a directory that contains metrics defined in
model_fn. It also contains a entry for global_step which contains the value
of the global step when evaluation was performed.
Args:
eval_results: dict, the result of evaluate() from a estimator.
"""
if not isinstance(eval_results, dict):
tf.logging.warning("eval_results should be directory for logging. Got %s",
type(eval_results))
return
global_step = eval_results[tf.GraphKeys.GLOBAL_STEP]
for key in sorted(eval_results):
if key != tf.GraphKeys.GLOBAL_STEP:
self.log_metric(key, eval_results[key], global_step=global_step) | Log the evaluation result for a estimator.
The evaluate result is a directory that contains metrics defined in
model_fn. It also contains a entry for global_step which contains the value
of the global step when evaluation was performed.
Args:
eval_results: dict, the result of evaluate() from a estimator. |
def read(self, *args, **kwargs):
"""Reads the node as a file
"""
with self.open('r') as f:
return f.read(*args, **kwargs) | Reads the node as a file |
def load_df_state(path_csv: Path)->pd.DataFrame:
'''load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run
'''
df_state = pd.read_csv(
path_csv,
header=[0, 1],
index_col=[0, 1],
parse_dates=True,
infer_datetime_format=True,
)
return df_state | load `df_state` from `path_csv`
Parameters
----------
path_csv : Path
path to the csv file that stores `df_state` produced by a supy run
Returns
-------
pd.DataFrame
`df_state` produced by a supy run |
def impact_path(self, value):
"""Setter to impact path.
:param value: The impact path.
:type value: str
"""
self._impact_path = value
if value is None:
self.action_show_report.setEnabled(False)
self.action_show_log.setEnabled(False)
self.report_path = None
self.log_path = None
else:
self.action_show_report.setEnabled(True)
self.action_show_log.setEnabled(True)
self.log_path = '%s.log.html' % self.impact_path
self.report_path = '%s.report.html' % self.impact_path
self.save_report_to_html()
self.save_log_to_html()
self.show_report() | Setter to impact path.
:param value: The impact path.
:type value: str |
def parse_words(self):
"""Parse TextGrid word intervals.
This method parses the word intervals in a TextGrid to extract each
word and each word's start and end times in the audio recording. For
each word, it instantiates the class Word(), with the word and its
start and end times as attributes of that class instance. Further, it
appends the class instance's attribute 'phones' for each phone that
occurs in that word. (It does this by checking which phones' start and
end times are subsumed by the start and end times of the word.)
"""
phones = self.parse_phones()
words = []
for i in self.word_intervals:
start = float(i[i.index('xmin = ')+7:
i.index('xmin = ')+12].strip('\t').strip('\n'))
end = float(i[i.index('xmax = ')+7:
i.index('xmax = ')+12].strip('\t').strip('\n'))
word = i[i.index('\"')+1:i.index("$")]
words.append(Word(word, start, end))
for word in words:
for phone in phones:
if phone.start >= word.start and phone.end <= word.end:
word.phones.append(phone)
return words | Parse TextGrid word intervals.
This method parses the word intervals in a TextGrid to extract each
word and each word's start and end times in the audio recording. For
each word, it instantiates the class Word(), with the word and its
start and end times as attributes of that class instance. Further, it
appends the class instance's attribute 'phones' for each phone that
occurs in that word. (It does this by checking which phones' start and
end times are subsumed by the start and end times of the word.) |
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status | Get power status of a z/VM instance. |
def output_file(self):
"""
If only one output file return it. Otherwise raise an exception.
"""
out_files = self.output_files
if len(out_files) != 1:
err_msg = "output_file property is only valid if there is a single"
err_msg += " output file. Here there are "
err_msg += "%d output files." %(len(out_files))
raise ValueError(err_msg)
return out_files[0] | If only one output file return it. Otherwise raise an exception. |
def _get_substitute_element(head, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap Instance
'''
if not isinstance(head, ElementDeclaration):
return None
return ElementDeclaration.getSubstitutionElement(head, elt, ps) | if elt matches a member of the head substitutionGroup, return
the GED typecode.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap Instance |
def setTimer(self, timeout, description=None):
"""
Sets a timer.
:param description:
:param timeout: timeout in seconds
:return: the timerId
"""
self.timerId += 1
timer = Timer(timeout, self.__timeoutHandler, (self.timerId, description))
timer.start()
self.timers[self.timerId] = timer
return self.timerId | Sets a timer.
:param description:
:param timeout: timeout in seconds
:return: the timerId |
def get_activity_query_session(self):
"""Gets the ``OsidSession`` associated with the activity query service.
return: (osid.learning.ActivityQuerySession) - a
``ActivityQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.*
"""
if not self.supports_activity_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ActivityQuerySession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the activity query service.
return: (osid.learning.ActivityQuerySession) - a
``ActivityQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_query()`` is ``true``.* |
def _combine(self, x, y):
"""Combines two constraints, raising an error if they are not compatible."""
if x is None or y is None:
return x or y
if x != y:
raise ValueError('Incompatible set of constraints provided.')
return x | Combines two constraints, raising an error if they are not compatible. |
def validate_password_confirmation(self, value):
""" password_confirmation check """
if value != self.initial_data['password']:
raise serializers.ValidationError(_('Password confirmation mismatch'))
return value | password_confirmation check |
def greenfct(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd):
r"""Calculate Green's function for TM and TE.
This is a modified version of empymod.kernel.greenfct(). See the original
version for more information.
"""
# GTM/GTE have shape (frequency, offset, lambda).
# gamTM/gamTE have shape (frequency, offset, layer, lambda):
for TM in [True, False]:
# Define eta/zeta depending if TM or TE
if TM:
e_zH, e_zV, z_eH = etaH, etaV, zetaH # TM: zetaV not used
else:
e_zH, e_zV, z_eH = zetaH, zetaV, etaH # TE: etaV not used
# Uppercase gamma
Gam = np.sqrt((e_zH/e_zV)[:, None, :, None] *
(lambd*lambd)[None, :, None, :] +
(z_eH*e_zH)[:, None, :, None])
# Gamma in receiver layer
lrecGam = Gam[:, :, lrec, :]
# Reflection (coming from below (Rp) and above (Rm) rec)
Rp, Rm = reflections(depth, e_zH, Gam, lrec, lsrc, False)
# Field propagators
# (Up- (Wu) and downgoing (Wd), in rec layer); Eq 74
if lrec != depth.size-1: # No upgoing field prop. if rec in last
ddepth = depth[lrec + 1] - zrec
Wu = np.exp(-lrecGam*ddepth)
else:
Wu = np.full_like(lrecGam, 0+0j)
if lrec != 0: # No downgoing field propagator if rec in first
ddepth = zrec - depth[lrec]
Wd = np.exp(-lrecGam*ddepth)
else:
Wd = np.full_like(lrecGam, 0+0j)
# Field at rec level (coming from below (Pu) and above (Pd) rec)
Puu, Pud, Pdu, Pdd = fields(depth, Rp, Rm, Gam, lrec, lsrc, zsrc, TM)
# Store in corresponding variable PT* = [T*uu, T*ud, T*du, T*dd]
df = np.exp(-lrecGam*abs(zsrc - zrec)) # direct field
fTM = Gam[:, :, lrec, :]/etaH[:, None, lrec, None]
fTE = zetaH[:, None, lsrc, None]/Gam[:, :, lsrc, :]
if TM:
PTM = [Puu*Wu*fTM, Pud*Wu*fTM, Pdu*Wd*fTM, Pdd*Wd*fTM, -df*fTM]
else:
PTE = [Puu*Wu*fTE, Pud*Wu*fTE, Pdu*Wd*fTE, Pdd*Wd*fTE, df*fTE]
# Return Green's functions
return PTM, PTE | r"""Calculate Green's function for TM and TE.
This is a modified version of empymod.kernel.greenfct(). See the original
version for more information. |
def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = None):
"""
Executes multiple graph cuts in parallel.
This can result in a significant speed-up.
Parameters
----------
graphcut_function : function
The graph cut to use (e.g. `graphcut_stawiaski`).
graphcut_arguments : tuple
List of arguments to pass to the respective subprocesses resp. the ``graphcut_function``.
processes : integer or None
The number of processes to run simultaneously, if not supplied, will be the same
as the number of processors.
Returns
-------
segmentations : tuple of ndarray
The graph-cut segmentation results as list of boolean arraya.
"""
# initialize logger
logger = Logger.getInstance()
# check and eventually enhance input parameters
if not processes: processes = multiprocessing.cpu_count()
if not int == type(processes) or processes <= 0: raise ArgumentError('The number processes can not be zero or negative.')
logger.debug('Executing graph cuts in {} subprocesses.'.format(multiprocessing.cpu_count()))
# creates subprocess pool and execute
pool = multiprocessing.Pool(processes)
results = pool.map(graphcut_function, graphcut_arguments)
return results | Executes multiple graph cuts in parallel.
This can result in a significant speed-up.
Parameters
----------
graphcut_function : function
The graph cut to use (e.g. `graphcut_stawiaski`).
graphcut_arguments : tuple
List of arguments to pass to the respective subprocesses resp. the ``graphcut_function``.
processes : integer or None
The number of processes to run simultaneously, if not supplied, will be the same
as the number of processors.
Returns
-------
segmentations : tuple of ndarray
The graph-cut segmentation results as list of boolean arraya. |
def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s',
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1 | dynamically update loss scale |
def category(self, categories):
"""Add categories assigned to this message
:rtype: list(Category)
"""
if isinstance(categories, list):
for c in categories:
self.add_category(c)
else:
self.add_category(categories) | Add categories assigned to this message
:rtype: list(Category) |
def _find_package(self, root_package):
"""Finds package name of file
:param root_package: root package
:return: package name
"""
package = self.path.replace(root_package, "")
if package.endswith(".py"):
package = package[:-3]
package = package.replace(os.path.sep, MODULE_SEP)
root_package = get_folder_name(root_package)
package = root_package + package # add root
return package | Finds package name of file
:param root_package: root package
:return: package name |
def compile_rcc(self, namespace, unknown):
"""Compile qt resource files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None
"""
rccfile = namespace.rccfile.name
qtcompile.compile_rcc(rccfile) | Compile qt resource files
:param namespace: namespace containing arguments from the launch parser
:type namespace: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: None |
def set_level(self, level):
"""
A method to set all column values to one of the levels.
:param str level: The level at which the column will be set (a string)
:returns: H2OFrame with entries set to the desired level.
"""
return H2OFrame._expr(expr=ExprNode("setLevel", self, level), cache=self._ex._cache) | A method to set all column values to one of the levels.
:param str level: The level at which the column will be set (a string)
:returns: H2OFrame with entries set to the desired level. |
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el | flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10] |
def _place_ticks_vertical(self):
"""Display the ticks for a vertical slider."""
for tick, label in zip(self.ticks, self.ticklabels):
y = self.convert_to_pixels(tick)
label.place_configure(y=y) | Display the ticks for a vertical slider. |
def hdrval(cls):
"""Construct dictionary mapping display column title to
IterationStats entries.
"""
hdrmap = {'Itn': 'Iter'}
hdrmap.update(cls.hdrval_objfun)
hdrmap.update({'r': 'PrimalRsdl', 's': 'DualRsdl', u('ρ'): 'Rho'})
return hdrmap | Construct dictionary mapping display column title to
IterationStats entries. |
def engine_from_environment() -> Engine:
"""Returns an Engine instance configured using environment variables.
If the environment variables are set, but incorrect, an authentication
failure will occur when attempting to run jobs on the engine.
Required Environment Variables:
QUANTUM_ENGINE_PROJECT: The name of a google cloud project, with the
quantum engine enabled, that you have access to.
QUANTUM_ENGINE_API_KEY: An API key for the google cloud project named
by QUANTUM_ENGINE_PROJECT.
Raises:
EnvironmentError: The environment variables are not set.
"""
api_key = os.environ.get(ENV_API_KEY)
if not api_key:
raise EnvironmentError(
'Environment variable {} is not set.'.format(ENV_API_KEY))
default_project_id = os.environ.get(ENV_DEFAULT_PROJECT_ID)
return Engine(api_key=api_key, default_project_id=default_project_id) | Returns an Engine instance configured using environment variables.
If the environment variables are set, but incorrect, an authentication
failure will occur when attempting to run jobs on the engine.
Required Environment Variables:
QUANTUM_ENGINE_PROJECT: The name of a google cloud project, with the
quantum engine enabled, that you have access to.
QUANTUM_ENGINE_API_KEY: An API key for the google cloud project named
by QUANTUM_ENGINE_PROJECT.
Raises:
EnvironmentError: The environment variables are not set. |
def reduce_hierarchy(x, depth):
"""Reduce the hierarchy (depth by `|`) string to the specified level"""
_x = x.split('|')
depth = len(_x) + depth - 1 if depth < 0 else depth
return '|'.join(_x[0:(depth + 1)]) | Reduce the hierarchy (depth by `|`) string to the specified level |
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
raw_order_list = rsp_pb.s2c.orderList
order_list = [OrderListQuery.parse_order(rsp_pb, order) for order in raw_order_list]
return RET_OK, "", order_list | Convert from PLS response to user response |
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item.
Args:
item (MenuItem): The item to be added.
"""
did_remove = self.remove_exit()
item.menu = self
self.items.append(item)
if did_remove:
self.add_exit() | Add an item to the end of the menu before the exit item.
Args:
item (MenuItem): The item to be added. |
def _updateMinDutyCycles(self):
"""
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
"""
if self._globalInhibition or self._inhibitionRadius > self._numInputs:
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal() | Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted. |
def getAssociation(self, assoc_handle, dumb, checkExpiration=True):
"""Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association}
"""
# Hmm. We've created an interface that deals almost entirely with
# assoc_handles. The only place outside the Signatory that uses this
# (and thus the only place that ever sees Association objects) is
# when creating a response to an association request, as it must have
# the association's secret.
if assoc_handle is None:
raise ValueError("assoc_handle must not be None")
if dumb:
key = self._dumb_key
else:
key = self._normal_key
assoc = self.store.getAssociation(key, assoc_handle)
if assoc is not None and assoc.expiresIn <= 0:
logging.info("requested %sdumb key %r is expired (by %s seconds)" %
((not dumb) and 'not-' or '',
assoc_handle, assoc.expiresIn))
if checkExpiration:
self.store.removeAssociation(key, assoc_handle)
assoc = None
return assoc | Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association} |
def get_urls(self):
"""
Returns a list of urls including all NestedSimpleRouter urls
"""
ret = super(SimpleRouter, self).get_urls()
for router in self.nested_routers:
ret.extend(router.get_urls())
return ret | Returns a list of urls including all NestedSimpleRouter urls |
def generate_obj(self):
"""Generates the secret object, respecting existing information
and user specified options"""
secret_obj = {}
if self.existing:
secret_obj = deepcopy(self.existing)
for key in self.keys:
key_name = key['name']
if self.existing and \
key_name in self.existing and \
not key.get('overwrite'):
LOG.debug("Not overwriting %s/%s", self.path, key_name)
continue
else:
secret_obj[key_name] = generated_key(key)
return secret_obj | Generates the secret object, respecting existing information
and user specified options |
def decrypt(receiver_prvhex: str, msg: bytes) -> bytes:
"""
Decrypt with eth private key
Parameters
----------
receiver_pubhex: str
Receiver's ethereum private key hex string
msg: bytes
Data to decrypt
Returns
-------
bytes
Plain text
"""
pubkey = msg[0:65] # pubkey's length is 65 bytes
encrypted = msg[65:]
sender_public_key = hex2pub(pubkey.hex())
private_key = hex2prv(receiver_prvhex)
aes_key = derive(private_key, sender_public_key)
return aes_decrypt(aes_key, encrypted) | Decrypt with eth private key
Parameters
----------
receiver_pubhex: str
Receiver's ethereum private key hex string
msg: bytes
Data to decrypt
Returns
-------
bytes
Plain text |
def get_constant_state(self):
"""Read state that was written in "first_part" mode.
Returns:
a structure
"""
ret = self.constant_states[self.next_constant_state]
self.next_constant_state += 1
return ret | Read state that was written in "first_part" mode.
Returns:
a structure |
def _extract_number_of_taxa(self):
"""
sets `self.number_taxa` to the number of taxa as string
"""
n_taxa = dict()
for i in self.seq_records:
if i.gene_code not in n_taxa:
n_taxa[i.gene_code] = 0
n_taxa[i.gene_code] += 1
number_taxa = sorted([i for i in n_taxa.values()], reverse=True)[0]
self.number_taxa = str(number_taxa) | sets `self.number_taxa` to the number of taxa as string |
def cur_space(self, name=None):
"""Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
"""
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space() | Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned. |
def app_restart(name, profile, **kwargs):
"""
Restart application.
Executes ```cocaine-tool app pause``` and ```cocaine-tool app start``` sequentially.
It can be used to quickly change application profile.
"""
ctx = Context(**kwargs)
ctx.execute_action('app:restart', **{
'node': ctx.repo.create_secure_service('node'),
'locator': ctx.locator,
'name': name,
'profile': profile,
}) | Restart application.
Executes ```cocaine-tool app pause``` and ```cocaine-tool app start``` sequentially.
It can be used to quickly change application profile. |
def put(self):
"""Re-import all templates, overwriting any local changes made"""
try:
_import_templates(force=True)
return self.make_response('Imported templates')
except:
self.log.exception('Failed importing templates')
return self.make_response('Failed importing templates', HTTP.SERVER_ERROR) | Re-import all templates, overwriting any local changes made |
def axis_angle(self):
""":obj:`numpy.ndarray` of float: The axis-angle representation for the rotation.
"""
qw, qx, qy, qz = self.quaternion
theta = 2 * np.arccos(qw)
omega = np.array([1,0,0])
if theta > 0:
rx = qx / np.sqrt(1.0 - qw**2)
ry = qy / np.sqrt(1.0 - qw**2)
rz = qz / np.sqrt(1.0 - qw**2)
omega = np.array([rx, ry, rz])
return theta * omega | :obj:`numpy.ndarray` of float: The axis-angle representation for the rotation. |
def extract_interesting_date_ranges(returns):
"""
Extracts returns based on interesting events. See
gen_date_range_interesting.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
ranges : OrderedDict
Date ranges, with returns, of all valid events.
"""
returns_dupe = returns.copy()
returns_dupe.index = returns_dupe.index.map(pd.Timestamp)
ranges = OrderedDict()
for name, (start, end) in PERIODS.items():
try:
period = returns_dupe.loc[start:end]
if len(period) == 0:
continue
ranges[name] = period
except BaseException:
continue
return ranges | Extracts returns based on interesting events. See
gen_date_range_interesting.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
ranges : OrderedDict
Date ranges, with returns, of all valid events. |
def get(self, endpoint='', url='', params=None, use_api_key=False):
"""Perform a get for a json API endpoint.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:return: Response json.
:rtype: ``dict``
"""
return self._request('get', endpoint, url, params=params, use_api_key=use_api_key) | Perform a get for a json API endpoint.
:param string endpoint: Target endpoint. (Optional).
:param string url: Override the endpoint and provide the full url (eg for pagination). (Optional).
:param dict params: Provide parameters to pass to the request. (Optional).
:return: Response json.
:rtype: ``dict`` |
def read_zipfile(self, encoding='utf8'):
"""
READ FIRST FILE IN ZIP FILE
:param encoding:
:return: STRING
"""
from zipfile import ZipFile
with ZipFile(self.abspath) as zipped:
for num, zip_name in enumerate(zipped.namelist()):
return zipped.open(zip_name).read().decode(encoding) | READ FIRST FILE IN ZIP FILE
:param encoding:
:return: STRING |
def report_many(self, event_list, metadata=None, block=None):
"""
Reports all the given events to Alooma by formatting them properly and
placing them in the buffer to be sent by the Sender instance
:param event_list: A list of dicts / strings representing events
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: A list with tuples, each containing a failed event
and its original index. An empty list means success
"""
failed_list = []
for index, event in enumerate(event_list):
queued_successfully = self.report(event, metadata, block)
if not queued_successfully:
failed_list.append((index, event))
return failed_list | Reports all the given events to Alooma by formatting them properly and
placing them in the buffer to be sent by the Sender instance
:param event_list: A list of dicts / strings representing events
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: A list with tuples, each containing a failed event
and its original index. An empty list means success |
def _f_gene(sid, prefix="G_"):
"""Clips gene prefix from id."""
sid = sid.replace(SBML_DOT, ".")
return _clip(sid, prefix) | Clips gene prefix from id. |
def _recurse(data, obj):
"""Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those.
"""
content = _ContentManager()
for child in obj.get_children():
# Some patches are Spines, too; skip those entirely.
# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.
if isinstance(child, mpl.spines.Spine):
continue
if isinstance(child, mpl.axes.Axes):
ax = axes.Axes(data, child)
if ax.is_colorbar:
continue
# add extra axis options
if data["extra axis options [base]"]:
ax.axis_options.extend(data["extra axis options [base]"])
data["current mpl axes obj"] = child
data["current axes"] = ax
# Run through the child objects, gather the content.
data, children_content = _recurse(data, child)
# populate content and add axis environment if desired
if data["add axis environment"]:
content.extend(
ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0
)
else:
content.extend(children_content, 0)
# print axis environment options, if told to show infos
if data["show_info"]:
print("=========================================================")
print("These would have been the properties of the environment:")
print("".join(ax.get_begin_code()[1:]))
print("=========================================================")
elif isinstance(child, mpl.lines.Line2D):
data, cont = line2d.draw_line2d(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.image.AxesImage):
data, cont = img.draw_image(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.patches.Patch):
data, cont = patch.draw_patch(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(
child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection)
):
data, cont = patch.draw_patchcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.PathCollection):
data, cont = path.draw_pathcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.LineCollection):
data, cont = line2d.draw_linecollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.QuadMesh):
data, cont = qmsh.draw_quadmesh(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.legend.Legend):
data = legend.draw_legend(data, child)
if data["legend colors"]:
content.extend(data["legend colors"], 0)
elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)):
data, cont = text.draw_text(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)):
pass
else:
warnings.warn(
"matplotlib2tikz: Don't know how to handle object {}.".format(
type(child)
)
)
return data, content.flatten() | Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those. |
def patch_api_service(self, name, body, **kwargs): # noqa: E501
"""patch_api_service # noqa: E501
partially update the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_api_service_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_api_service_with_http_info(name, body, **kwargs) # noqa: E501
return data | patch_api_service # noqa: E501
partially update the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the APIService (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1APIService
If the method is called asynchronously,
returns the request thread. |
def mixin_function_or_method(target, routine, name=None, isbound=False):
"""Mixin a routine into the target.
:param routine: routine to mix in target.
:param str name: mixin name. Routine name by default.
:param bool isbound: If True (False by default), the mixin result is a
bound method to target.
"""
function = None
if isfunction(routine):
function = routine
elif ismethod(routine):
function = get_method_function(routine)
else:
raise Mixin.MixInError(
"{0} must be a function or a method.".format(routine))
if name is None:
name = routine.__name__
if not isclass(target) or isbound:
_type = type(target)
method_args = [function, target]
if PY2:
method_args += _type
result = MethodType(*method_args)
else:
if PY2:
result = MethodType(function, None, target)
else:
result = function
Mixin.set_mixin(target, result, name)
return result | Mixin a routine into the target.
:param routine: routine to mix in target.
:param str name: mixin name. Routine name by default.
:param bool isbound: If True (False by default), the mixin result is a
bound method to target. |
def set_tag(self, ip_dest, next_hop, **kwargs):
"""Set the tag value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing route_name value must be included in call to
set_tag, otherwise the tag will be reset
by the call to EOS.
"""
# Call _set_route with the new tag information
return self._set_route(ip_dest, next_hop, **kwargs) | Set the tag value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing route_name value must be included in call to
set_tag, otherwise the tag will be reset
by the call to EOS. |
def get_submissions(self, url):
"""
Connects to Reddit and gets a JSON representation of submissions.
This JSON data is then processed and returned.
url: A url that requests for submissions should be sent to.
"""
response = self.client.get(url, params={'limit': self.options['limit']})
submissions = [x['data'] for x in response.json()['data']['children']]
return submissions | Connects to Reddit and gets a JSON representation of submissions.
This JSON data is then processed and returned.
url: A url that requests for submissions should be sent to. |
def distortImage(self, image):
'''
opposite of 'correct'
'''
image = imread(image)
(imgHeight, imgWidth) = image.shape[:2]
mapx, mapy = self.getDistortRectifyMap(imgWidth, imgHeight)
return cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR,
borderValue=(0, 0, 0)) | opposite of 'correct' |
def peek_step(self, val: ArrayValue,
sn: "DataNode") -> Tuple[Optional[Value], "DataNode"]:
"""Return entry value addressed by the receiver + its schema node.
Args:
val: Current value (array).
sn: Current schema node.
"""
try:
return val[self.index], sn
except (IndexError, KeyError, TypeError):
return None, sn | Return entry value addressed by the receiver + its schema node.
Args:
val: Current value (array).
sn: Current schema node. |
def untag(name, tag_name):
"""
Remove the given tag from the given metric.
Return True if the metric was tagged, False otherwise
"""
with LOCK:
by_tag = TAGS.get(tag_name, None)
if not by_tag:
return False
try:
by_tag.remove(name)
# remove the tag if no associations left
if not by_tag:
TAGS.pop(tag_name)
return True
except KeyError:
return False | Remove the given tag from the given metric.
Return True if the metric was tagged, False otherwise |
def _compute_subplot_domains(widths, spacing):
"""
Compute normalized domain tuples for a list of widths and a subplot
spacing value
Parameters
----------
widths: list of float
List of the desired withs of each subplot. The length of this list
is also the specification of the number of desired subplots
spacing: float
Spacing between subplots in normalized coordinates
Returns
-------
list of tuple of float
"""
# normalize widths
widths_sum = float(sum(widths))
total_spacing = (len(widths) - 1) * spacing
widths = [(w / widths_sum)*(1-total_spacing) for w in widths]
domains = []
for c in range(len(widths)):
domain_start = c * spacing + sum(widths[:c])
domain_stop = min(1, domain_start + widths[c])
domains.append((domain_start, domain_stop))
return domains | Compute normalized domain tuples for a list of widths and a subplot
spacing value
Parameters
----------
widths: list of float
List of the desired withs of each subplot. The length of this list
is also the specification of the number of desired subplots
spacing: float
Spacing between subplots in normalized coordinates
Returns
-------
list of tuple of float |
def indication(self, pdu):
"""Send a message."""
if _debug: Node._debug("indication(%s) %r", self.name, pdu)
# make sure we're connected
if not self.lan:
raise ConfigurationError("unbound node")
# if the pduSource is unset, fill in our address, otherwise
# leave it alone to allow for simulated spoofing
if pdu.pduSource is None:
pdu.pduSource = self.address
elif (not self.spoofing) and (pdu.pduSource != self.address):
raise RuntimeError("spoofing address conflict")
# actual network delivery is a zero-delay task
OneShotFunction(self.lan.process_pdu, pdu) | Send a message. |
def add(self, child):
"""
Adds a typed child object to the component type.
@param child: Child object to be added.
"""
if isinstance(child, FatComponent):
self.add_child_component(child)
else:
Fat.add(self, child) | Adds a typed child object to the component type.
@param child: Child object to be added. |
def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug('Adding panel and genes-related parameters to the query')
gene_query = []
if query.get('hgnc_symbols') and query.get('gene_panels'):
gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})
gene_query.append({'panels': {'$in': query['gene_panels']}})
mongo_query['$or']=gene_query
else:
if query.get('hgnc_symbols'):
hgnc_symbols = query['hgnc_symbols']
mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" %
', '.join(hgnc_symbols))
if query.get('gene_panels'):
gene_panels = query['gene_panels']
mongo_query['panels'] = {'$in': gene_panels}
return gene_query | Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters |
def start(self):
"""Start listening from stream"""
if self.stream is None:
from pyaudio import PyAudio, paInt16
self.pa = PyAudio()
self.stream = self.pa.open(
16000, 1, paInt16, True, frames_per_buffer=self.chunk_size
)
self._wrap_stream_read(self.stream)
self.engine.start()
self.running = True
self.is_paused = False
self.thread = Thread(target=self._handle_predictions)
self.thread.daemon = True
self.thread.start() | Start listening from stream |
def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center) | Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False] |
def par_y1step(i):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of
optimizing :math:`\mathbf{y}_1`.
Parameters
----------
i : int
Index of grouping to update
"""
global mp_Y1
grpind = slice(mp_grp[i], mp_grp[i+1])
XU1 = mp_X[grpind] + 1/mp_alpha*mp_U1[grpind]
if mp_wl1.shape[mp_axisM] is 1:
gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1
else:
gamma = mp_lmbda/(mp_alpha**2*mp_rho)*mp_wl1[grpind]
Y1 = sp.prox_l1(XU1, gamma)
if mp_NonNegCoef:
Y1[Y1 < 0.0] = 0.0
if mp_NoBndryCross:
for n in range(len(mp_Nv)):
Y1[(slice(None),) + (slice(None),)*n +
(slice(1-mp_Dshp[n], None),)] = 0.0
mp_Y1[mp_grp[i]:mp_grp[i+1]] = Y1 | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}_{1,G_i}`, one of the disjoint problems of
optimizing :math:`\mathbf{y}_1`.
Parameters
----------
i : int
Index of grouping to update |
def pot_for_component(pot, q, component=1, reverse=False):
"""
q for secondaries should already be flipped (via q_for_component)
"""
# currently only used by legacy wrapper: consider moving/removing
if component==1:
return pot
elif component==2:
if reverse:
return pot/q + 0.5*(q-1)/q
else:
return q*pot - 0.5 * (q-1)
else:
raise NotImplementedError | q for secondaries should already be flipped (via q_for_component) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.