code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def inet_ntoa(address):
"""Convert a network format IPv6 address into text.
@param address: the binary address
@type address: string
@rtype: string
@raises ValueError: the address isn't 16 bytes long
"""
if len(address) != 16:
raise ValueError("IPv6 addresses are 16 bytes long")
hex = address.encode('hex_codec')
chunks = []
i = 0
l = len(hex)
while i < l:
chunk = hex[i : i + 4]
# strip leading zeros. we do this with an re instead of
# with lstrip() because lstrip() didn't support chars until
# python 2.2.2
m = _leading_zero.match(chunk)
if not m is None:
chunk = m.group(1)
chunks.append(chunk)
i += 4
#
# Compress the longest subsequence of 0-value chunks to ::
#
best_start = 0
best_len = 0
start = -1
last_was_zero = False
for i in xrange(8):
if chunks[i] != '0':
if last_was_zero:
end = i
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
last_was_zero = False
elif not last_was_zero:
start = i
last_was_zero = True
if last_was_zero:
end = 8
current_len = end - start
if current_len > best_len:
best_start = start
best_len = current_len
if best_len > 0:
if best_start == 0 and \
(best_len == 6 or
best_len == 5 and chunks[5] == 'ffff'):
# We have an embedded IPv4 address
if best_len == 6:
prefix = '::'
else:
prefix = '::ffff:'
hex = prefix + dns.ipv4.inet_ntoa(address[12:])
else:
hex = ':'.join(chunks[:best_start]) + '::' + \
':'.join(chunks[best_start + best_len:])
else:
hex = ':'.join(chunks)
return hex | Convert a network format IPv6 address into text.
@param address: the binary address
@type address: string
@rtype: string
@raises ValueError: the address isn't 16 bytes long |
def initialize_sentry_integration(): # pragma: no cover
"""\
Used to optionally initialize the Sentry service with this app.
See https://docs.sentry.io/platforms/python/pyramid/
"""
# This function is not under coverage because it is boilerplate
# from the Sentry documentation.
try:
import sentry_sdk
from sentry_sdk.integrations.pyramid import PyramidIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
except ImportError:
warnings.warn(
"Sentry is not configured because the Sentry SDK "
"(sentry_sdk package) is not installed",
UserWarning,
)
return # bail out early
try:
dsn = os.environ['SENTRY_DSN']
except KeyError:
warnings.warn(
"Sentry is not configured because SENTRY_DSN "
"was not supplied.",
UserWarning,
)
else:
sentry_sdk.init(
dsn=dsn,
integrations=[PyramidIntegration(), CeleryIntegration()],
) | \
Used to optionally initialize the Sentry service with this app.
See https://docs.sentry.io/platforms/python/pyramid/ |
def _parse_effects(self, effects_json=None):
# type: (dict) -> Any
"""Parse multiple effects from an effects(list) json."""
if isinstance(effects_json, list):
return [ValidatorEffect.parse(effect) for effect in effects_json]
elif isinstance(effects_json, dict):
return ValidatorEffect.parse(effects_json)
else:
raise Exception("The provided json, should be a list of valid effects, "
"or a single effect. Got '{}'".format(effects_json)) | Parse multiple effects from an effects(list) json. |
def query_invitations(cls, user, eager=False):
"""Get all invitations for given user."""
if eager:
eager = [Membership.group]
return cls.query_by_user(user, state=MembershipState.PENDING_USER,
eager=eager) | Get all invitations for given user. |
def _erase_vm_info(name):
'''
erase the information for a VM the we are destroying.
some sdb drivers (such as the SQLite driver we expect to use)
do not have a `delete` method, so if the delete fails, we have
to replace the with a blank entry.
'''
try:
# delete the machine record
vm_ = get_vm_info(name)
if vm_['machine']:
key = _build_machine_uri(vm_['machine'], vm_.get('cwd', '.'))
try:
__utils__['sdb.sdb_delete'](key, __opts__)
except KeyError:
# no delete method found -- load a blank value
__utils__['sdb.sdb_set'](key, None, __opts__)
except Exception:
pass
uri = _build_sdb_uri(name)
try:
# delete the name record
__utils__['sdb.sdb_delete'](uri, __opts__)
except KeyError:
# no delete method found -- load an empty dictionary
__utils__['sdb.sdb_set'](uri, {}, __opts__)
except Exception:
pass | erase the information for a VM the we are destroying.
some sdb drivers (such as the SQLite driver we expect to use)
do not have a `delete` method, so if the delete fails, we have
to replace the with a blank entry. |
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz | Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization. |
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs):
# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]
"""Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form
"""
stream = Stream(filepath, headers=headers, **kwargs)
stream.open()
result = stream.read(keyed=dict_form)
stream.close()
return result | Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form |
def _mkpda(self, nonterms, productions, productions_struct, terminals, splitstring=1):
"""
This function generates a PDA from a CNF grammar as described in:
- http://www.oit.edu/faculty/sherry.yang/CST229/Lectures/7_pda.pdf
- http://www.eng.utah.edu/~cs3100/lectures/l18/pda-notes.pdf
If all of the grammar productions are in the Chomsky Normal Form,
then follow the template for constructing a pushdown symautomata:
1. Start
2. Push S
3. Pop
4. Case:
Nonterminal A: For every production rule of this form: A: BC, Push C and then Push B
Args:
nonterms (list): Non terminals list
productions (dict): productions in the CNF form:
A -> a or A -> b0b1, or S -> e
productions_struct (dict): productions in the CNF form in structure form
object.a for A -> a,
object.b0 and object.b1 for A -> b0b1
and object.type where type is
1 for A-->a and 2 for A-->b0b1
terminals (list): All terminals
splitstring (bool): If enabled an extra space is added after each symbol.
Returns:
PDA: The generated PDA
"""
pda = PDA(self.alphabet)
pda.nonterminals = nonterms
pda.terminals = terminals
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].sym = '@closing'
pda.s[pda.n].type = 1
pda.s[pda.n].trans[1] = [0]
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 1
pda.s[pda.n].sym = nonterms[0]
pda.s[pda.n].trans[2] = [0]
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 2
pda.s[pda.n].trans[0] = ['@closing']
counter = 0
i = 0
while i < len(nonterms):
j = 0
while j < len(productions[nonterms[i]]):
if productions_struct[counter].type == 1:
# ADD AND CONNECT STATE
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
if pda.n not in pda.s[2].trans:
pda.s[2].trans[pda.n] = []
pda.s[2].trans[pda.n].append(nonterms[i])
if splitstring == 0:
# FILL NEW STATE READ
pda.s[pda.n].type = 3
pda.s[pda.n].trans[2] = [productions_struct[counter].a]
else:
# THE FOLLOWIN SWITCH IS DUE TO THE REQUIREMENT OF
# HAVING STRINGS SPLITTED TO SYMBOLS AND CAN INTERSECT
# WITH DFA
if productions_struct[counter].a not in terminals or \
len(productions_struct[counter].a) == 1:
# FILL NEW STATE READ
pda.s[pda.n].type = 3
pda.s[pda.n].trans[pda.n + 1] = [productions_struct[counter].a.lower()]
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 3
pda.s[pda.n].trans[2] = [' ']
else:
pda.s[pda.n].type = 3
pda.s[pda.n].trans[pda.n + 1] = \
[productions_struct[counter].a[0].lower()]
k = 1
while k < len(productions_struct[counter].a) - 1:
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 3
pda.s[pda.n].trans[pda.n +1] = \
[productions_struct[counter].a[k].lower()]
k = k + 1
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 3
pda.s[pda.n].trans[pda.n + 1] = \
[productions_struct[counter].a[-1].lower()]
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
pda.s[pda.n].type = 3
pda.s[pda.n].trans[2] = [' ']
else:
# ADD AND CONNECT PUSH STATE
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
if pda.n not in pda.s[2].trans:
pda.s[2].trans[pda.n] = []
pda.s[2].trans[pda.n].append(nonterms[i])
# FILL NEW STATE
pda.s[pda.n].type = 1
pda.s[pda.n].sym = productions_struct[counter].b1
pda.s[pda.n].trans[(pda.n) + 1] = [0]
# ADD AND CONNECT PUSH STATE (ALREADY CONNECTED)
pda.n = pda.n + 1
pda.s[pda.n] = PDAState()
pda.s[pda.n].id = pda.n
# FILL NEW STATE
pda.s[pda.n].type = 1
pda.s[pda.n].sym = productions_struct[counter].b0
pda.s[pda.n].trans[2] = [0]
j = j + 1
counter = counter + 1
i = i + 1
return pda | This function generates a PDA from a CNF grammar as described in:
- http://www.oit.edu/faculty/sherry.yang/CST229/Lectures/7_pda.pdf
- http://www.eng.utah.edu/~cs3100/lectures/l18/pda-notes.pdf
If all of the grammar productions are in the Chomsky Normal Form,
then follow the template for constructing a pushdown symautomata:
1. Start
2. Push S
3. Pop
4. Case:
Nonterminal A: For every production rule of this form: A: BC, Push C and then Push B
Args:
nonterms (list): Non terminals list
productions (dict): productions in the CNF form:
A -> a or A -> b0b1, or S -> e
productions_struct (dict): productions in the CNF form in structure form
object.a for A -> a,
object.b0 and object.b1 for A -> b0b1
and object.type where type is
1 for A-->a and 2 for A-->b0b1
terminals (list): All terminals
splitstring (bool): If enabled an extra space is added after each symbol.
Returns:
PDA: The generated PDA |
def early_create_objects(self, raw_objects):
"""Create the objects needed for the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None
"""
types_creations = self.__class__.types_creations
early_created_types = self.__class__.early_created_types
logger.info("Creating objects...")
for o_type in sorted(types_creations):
if o_type in early_created_types:
self.create_objects_for_type(raw_objects, o_type)
logger.info("Done") | Create the objects needed for the post configuration file initialization
:param raw_objects: dict with all object with str values
:type raw_objects: dict
:return: None |
def _attach(self, instruction, qargs, cargs):
"""DEPRECATED after 0.8"""
self.append(instruction, qargs, cargs) | DEPRECATED after 0.8 |
def pitch(self, shift,
use_tree=False,
segment=82,
search=14.68,
overlap=12):
"""pitch takes 4 parameters: user_tree (True or False), segment, search
and overlap."""
self.command.append("pitch")
if use_tree:
self.command.append('-q')
self.command.append(shift)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self | pitch takes 4 parameters: user_tree (True or False), segment, search
and overlap. |
def p_property_decl(self, p):
""" property_decl : prop_open style_list t_semicolon
| prop_open style_list css_important t_semicolon
| prop_open empty t_semicolon
"""
l = len(p)
p[0] = Property(list(p)[1:-1], p.lineno(l - 1)) | property_decl : prop_open style_list t_semicolon
| prop_open style_list css_important t_semicolon
| prop_open empty t_semicolon |
def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):
"""Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file.
"""
# Create the output file name
outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)
# Run pepstats
program = 'pepstats'
pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile)
cmd_string = '{} {}'.format(program, pepstats_args)
ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True)
return outfile | Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file. |
def query_by_post(postid):
'''
Query records by post.
'''
return TabPost2Tag.select().where(
TabPost2Tag.post_id == postid
).order_by(TabPost2Tag.order) | Query records by post. |
def prepare(self, engine, mode, items) -> None:
"""
Create a unique transaction id and dumps the items into a cached request object.
"""
self.tx_id = str(uuid.uuid4()).replace("-", "")
self.engine = engine
self.mode = mode
self.items = items
self._prepare_request() | Create a unique transaction id and dumps the items into a cached request object. |
def user_order_by(self, field):
"""
Queryset method ordering objects by user ordering field.
"""
# Get ordering model.
model_label = order.utils.resolve_labels('.'.join(\
[self.model._meta.app_label, self.model._meta.object_name]))
orderitem_set = getattr(self.model, \
order.utils.resolve_order_item_related_set_name(model_label))
order_model = orderitem_set.related.model
# Resolve ordering model table name.
db_table = order_model._meta.db_table
# Add ordering field as extra queryset fields.
pk_name = self.model._meta.pk.attname
# If we have a descending query remove '-' from field name when quering.
sanitized_field = field.lstrip('-')
extra_select = {
sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \
(sanitized_field, db_table, self.model._meta.db_table, pk_name)
}
# Use original field name when ordering to allow for descending.
return self.extra(select=extra_select).all().order_by(field) | Queryset method ordering objects by user ordering field. |
def get_total_DOS(self):
"""Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double'
"""
warnings.warn("Phonopy.get_total_DOS is deprecated. "
"Use Phonopy.get_total_dos_dict.", DeprecationWarning)
dos = self.get_total_dos_dict()
return dos['frequency_points'], dos['total_dos'] | Return frequency points and total DOS as a tuple.
Returns
-------
A tuple with (frequency_points, total_dos).
frequency_points: ndarray
shape=(frequency_sampling_points, ), dtype='double'
total_dos:
shape=(frequency_sampling_points, ), dtype='double' |
def suspendMember(self, clusterId, memberId):
"""
Parameters:
- clusterId
- memberId
"""
self.send_suspendMember(clusterId, memberId)
return self.recv_suspendMember() | Parameters:
- clusterId
- memberId |
def error(self):
"""
Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2`
"""
r = self.residuals.ravel()
return np.dot(r,r) | Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2` |
def _transform_index(index, func, level=None):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(index, MultiIndex):
if level is not None:
items = [tuple(func(y) if i == level else y
for i, y in enumerate(x)) for x in index]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name, tupleize_cols=False) | Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified. |
def teardown(self):
"""Cleanup cache tables."""
for table_spec in reversed(self._table_specs):
with self._conn:
table_spec.teardown(self._conn) | Cleanup cache tables. |
def get_ssh_client(ip,
ssh_private_key_file,
ssh_user='root',
port=22,
timeout=600,
wait_period=10):
"""Attempt to establish and test ssh connection."""
if ip in CLIENT_CACHE:
return CLIENT_CACHE[ip]
start = time.time()
end = start + timeout
client = None
while time.time() < end:
try:
client = establish_ssh_connection(
ip,
ssh_private_key_file,
ssh_user,
port,
timeout=wait_period
)
execute_ssh_command(client, 'ls')
except: # noqa: E722
if client:
client.close()
wait_period += wait_period
else:
CLIENT_CACHE[ip] = client
return client
raise IpaSSHException(
'Attempt to establish SSH connection failed.'
) | Attempt to establish and test ssh connection. |
def list_extmods():
'''
.. versionadded:: 2017.7.0
List Salt modules which have been synced externally
CLI Examples:
.. code-block:: bash
salt '*' saltutil.list_extmods
'''
ret = {}
ext_dir = os.path.join(__opts__['cachedir'], 'extmods')
mod_types = os.listdir(ext_dir)
for mod_type in mod_types:
ret[mod_type] = set()
for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)):
for fh_ in files:
ret[mod_type].add(fh_.split('.')[0])
ret[mod_type] = list(ret[mod_type])
return ret | .. versionadded:: 2017.7.0
List Salt modules which have been synced externally
CLI Examples:
.. code-block:: bash
salt '*' saltutil.list_extmods |
def get_magnitude_scaling_term(self, C, mag):
"""
Returns the magnitude scaling term (equation 1)
"""
mval = mag - 3.0
return C['b1'] + C['b2'] * mval + C['b3'] * (mval ** 2.0) +\
C['b4'] * (mval ** 3.0) | Returns the magnitude scaling term (equation 1) |
def get_scope_names(self) -> list:
"""
Return the list of all contained scope from global to local
"""
# allow global scope to have an None string instance
lscope = []
for scope in reversed(self.get_scope_list()):
if scope.name is not None:
# handle fun/block scope decoration
lscope.append(scope.name)
return lscope | Return the list of all contained scope from global to local |
def reads_overlapping_variant(
samfile,
variant,
chromosome=None,
use_duplicate_reads=USE_DUPLICATE_READS,
use_secondary_alignments=USE_SECONDARY_ALIGNMENTS,
min_mapping_quality=MIN_READ_MAPPING_QUALITY):
"""
Find reads in the given SAM/BAM file which overlap the given variant and
return them as a list of AlleleRead objects.
Parameters
----------
samfile : pysam.AlignmentFile
variant : varcode.Variant
chromosome : str
use_duplicate_reads : bool
Should we use reads that have been marked as PCR duplicates
use_secondary_alignments : bool
Should we use reads at locations other than their best alignment
min_mapping_quality : int
Drop reads below this mapping quality
only_alt_allele : bool
Filter reads to only include those that support the alt allele of
the variant.
Returns sequence of AlleleRead objects.
"""
logger.info("Gathering reads for %s", variant)
if chromosome is None:
chromosome = variant.contig
logger.info(
"Gathering variant reads for variant %s (chromosome = %s, gene names = %s)",
variant,
chromosome,
variant.gene_names)
base1_position, ref, alt = trim_variant(variant)
if len(ref) == 0:
# if the variant is an insertion
base1_position_before_variant = base1_position
base1_position_after_variant = base1_position + 1
else:
base1_position_before_variant = base1_position - 1
base1_position_after_variant = base1_position + len(ref)
locus_reads = locus_read_generator(
samfile=samfile,
chromosome=chromosome,
base1_position_before_variant=base1_position_before_variant,
base1_position_after_variant=base1_position_after_variant,
use_duplicate_reads=use_duplicate_reads,
use_secondary_alignments=use_secondary_alignments,
min_mapping_quality=min_mapping_quality)
allele_reads = allele_reads_from_locus_reads(
locus_reads=locus_reads,
n_ref=len(ref))
return allele_reads | Find reads in the given SAM/BAM file which overlap the given variant and
return them as a list of AlleleRead objects.
Parameters
----------
samfile : pysam.AlignmentFile
variant : varcode.Variant
chromosome : str
use_duplicate_reads : bool
Should we use reads that have been marked as PCR duplicates
use_secondary_alignments : bool
Should we use reads at locations other than their best alignment
min_mapping_quality : int
Drop reads below this mapping quality
only_alt_allele : bool
Filter reads to only include those that support the alt allele of
the variant.
Returns sequence of AlleleRead objects. |
def exec_python(*args, **kwargs):
"""
Wrap running python script in a subprocess.
Return stdout of the invoked command.
"""
cmdargs, kwargs = __wrap_python(args, kwargs)
return exec_command(*cmdargs, **kwargs) | Wrap running python script in a subprocess.
Return stdout of the invoked command. |
def frameAndSave(abf,tag="",dataType="plot",saveAsFname=False,closeWhenDone=True):
"""
frame the current matplotlib plot with ABF info, and optionally save it.
Note that this is entirely independent of the ABFplot class object.
if saveImage is False, show it instead.
Datatype should be:
* plot
* experiment
"""
print("closeWhenDone",closeWhenDone)
plt.tight_layout()
plt.subplots_adjust(top=.93,bottom =.07)
plt.annotate(tag,(.01,.99),xycoords='figure fraction',ha='left',va='top',family='monospace',size=10,alpha=.5)
msgBot="%s [%s]"%(abf.ID,abf.protocomment)
plt.annotate(msgBot,(.01,.01),xycoords='figure fraction',ha='left',va='bottom',family='monospace',size=10,alpha=.5)
fname=tag.lower().replace(" ",'_')+".jpg"
fname=dataType+"_"+fname
plt.tight_layout()
if IMAGE_SAVE:
abf.log.info("saving [%s]",fname)
try:
if saveAsFname:
saveAs=os.path.abspath(saveAsFname)
else:
saveAs=os.path.abspath(abf.outPre+fname)
if not os.path.exists(abf.outFolder):
os.mkdir(abf.outFolder)
plt.savefig(saveAs)
except Exception as E:
abf.log.error("saving [%s] failed! 'pip install pillow'?",fname)
print(E)
if IMAGE_SHOW==True:
if closeWhenDone==False:
print("NOT SHOWING (because closeWhenDone==True and showing would mess things up)")
else:
abf.log.info("showing [%s]",fname)
plt.show()
if closeWhenDone:
print("closing figure")
plt.close('all') | frame the current matplotlib plot with ABF info, and optionally save it.
Note that this is entirely independent of the ABFplot class object.
if saveImage is False, show it instead.
Datatype should be:
* plot
* experiment |
def run_command(cmd, debug=False):
"""
Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: None
"""
if debug:
msg = ' PWD: {}'.format(os.getcwd())
print_warn(msg)
msg = ' COMMAND: {}'.format(cmd)
print_warn(msg)
cmd() | Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: None |
def GetScripts(self, dest_dir):
"""Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts.
"""
metadata_dict = self.watcher.GetMetadata() or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = None
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = None
self.logger.warning('Project attributes were not found.')
return (self._GetAttributeScripts(instance_data, dest_dir)
or self._GetAttributeScripts(project_data, dest_dir)) | Retrieve the scripts to execute.
Args:
dest_dir: string, the path to a directory for storing metadata scripts.
Returns:
dict, a dictionary mapping set metadata keys with associated scripts. |
def get_variant_phenotypes_with_suggested_changes(variant_id_list):
'''for each variant, yields evidence and associated phenotypes, both current and suggested'''
variants = civic.get_variants_by_ids(variant_id_list)
evidence = list()
for variant in variants:
evidence.extend(variant.evidence)
for e in evidence:
suggested_changes_url = f'https://civicdb.org/api/evidence_items/{e.id}/suggested_changes'
resp = requests.get(suggested_changes_url)
resp.raise_for_status()
suggested_changes = dict()
for suggested_change in resp.json():
pheno_changes = suggested_change['suggested_changes'].get('phenotype_ids', None)
if pheno_changes is None:
continue
a, b = pheno_changes
added = set(b) - set(a)
deleted = set(a) - set(b)
rid = suggested_change['id']
suggested_changes[rid] = {'added': added, 'deleted': deleted}
yield e, {'suggested_changes': suggested_changes, 'current': set([x.id for x in e.phenotypes])} | for each variant, yields evidence and associated phenotypes, both current and suggested |
def load_edgegrid_client_settings():
'''Load Akamai EdgeGrid configuration
returns a (hostname, EdgeGridAuth) tuple from the following locations:
1. Values specified directly in the Django settings::
AKAMAI_CCU_CLIENT_SECRET
AKAMAI_CCU_HOST
AKAMAI_CCU_ACCESS_TOKEN
AKAMAI_CCU_CLIENT_TOKEN
2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings
3. The default ~/.edgerc file
Both edgerc file load options will return the values from the “CCU” section
by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting.
'''
if getattr(settings, 'AKAMAI_CCU_CLIENT_SECRET', None):
# If the settings module has the values directly and they are not empty
# we'll use them without checking for an edgerc file:
host = settings.AKAMAI_CCU_HOST
auth = EdgeGridAuth(access_token=settings.AKAMAI_CCU_ACCESS_TOKEN,
client_token=settings.AKAMAI_CCU_CLIENT_TOKEN,
client_secret=settings.AKAMAI_CCU_CLIENT_SECRET)
return host, auth
else:
edgerc_section = getattr(settings, 'AKAMAI_EDGERC_CCU_SECTION', 'CCU')
edgerc_path = getattr(settings, 'AKAMAI_EDGERC_FILENAME', '~/.edgerc')
edgerc_path = os.path.expanduser(edgerc_path)
if os.path.isfile(edgerc_path):
edgerc = EdgeRc(edgerc_path)
host = edgerc.get(edgerc_section, 'host')
auth = EdgeGridAuth.from_edgerc(edgerc, section=edgerc_section)
return host, auth
raise InvalidAkamaiConfiguration('Cannot find Akamai client configuration!') | Load Akamai EdgeGrid configuration
returns a (hostname, EdgeGridAuth) tuple from the following locations:
1. Values specified directly in the Django settings::
AKAMAI_CCU_CLIENT_SECRET
AKAMAI_CCU_HOST
AKAMAI_CCU_ACCESS_TOKEN
AKAMAI_CCU_CLIENT_TOKEN
2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings
3. The default ~/.edgerc file
Both edgerc file load options will return the values from the “CCU” section
by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting. |
def _restart_target(self):
"""
Restart our Target.
"""
if self._server:
if self._server.returncode is None:
self._server.kill()
time.sleep(0.2)
self._server = subprocess.Popen("python session_server.py", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
time.sleep(0.2) | Restart our Target. |
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop() | Remove the current task from the stack. |
def get_navigation(request):
""" Returns the rendered navigation block. Requires that the
`navigation.html` template exists. Two context variables are passed to it:
* sections (see :func:`get_breadcrumb_sections`)
* trail (see :func:`get_breadcrumb_trail`)
"""
sections = _get_sections(request)
trail = _get_trail(request, exclude_section=True)
return mark_safe(render_to_string('navigation.html',
dict(sections=sections,trail=trail))) | Returns the rendered navigation block. Requires that the
`navigation.html` template exists. Two context variables are passed to it:
* sections (see :func:`get_breadcrumb_sections`)
* trail (see :func:`get_breadcrumb_trail`) |
def _wait_for_job_done(self, project_id, job_id, interval=30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job
"""
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval) | Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
Raises:
googleapiclient.errors.HttpError: if HTTP error is returned when getting
the job |
def freeze(self, progressbar=None):
"""
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
"""
# Call the storage broker pre_freeze hook.
self._storage_broker.pre_freeze_hook()
if progressbar:
progressbar.label = "Freezing dataset"
# Generate and persist the manifest.
manifest = self.generate_manifest(progressbar=progressbar)
self._storage_broker.put_manifest(manifest)
# Generate and persist overlays from any item metadata that has been
# added.
overlays = self._generate_overlays()
for overlay_name, overlay in overlays.items():
self._put_overlay(overlay_name, overlay)
# Change the type of the dataset from "protodataset" to "dataset" and
# add a "frozen_at" time stamp to the administrative metadata.
datetime_obj = datetime.datetime.utcnow()
metadata_update = {
"type": "dataset",
"frozen_at": dtoolcore.utils.timestamp(datetime_obj)
}
self._admin_metadata.update(metadata_update)
self._storage_broker.put_admin_metadata(self._admin_metadata)
# Clean up using the storage broker's post freeze hook.
self._storage_broker.post_freeze_hook() | Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`. |
def has_hlu(self, lun_or_snap, cg_member=None):
"""Returns True if `lun_or_snap` is attached to the host.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: True - if `lun_or_snap` is attached, otherwise False.
"""
hlu = self.get_hlu(lun_or_snap, cg_member=cg_member)
return hlu is not None | Returns True if `lun_or_snap` is attached to the host.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: True - if `lun_or_snap` is attached, otherwise False. |
def initialize_workflow(self, workflow):
"""
Create a workflow
workflow - a workflow class
"""
self.workflow = workflow()
self.workflow.tasks = self.tasks
self.workflow.input_file = self.input_file
self.workflow.input_format = self.input_format
self.workflow.target_file = self.target_file
self.workflow.target_format = self.target_format
self.workflow.run_id = self.run_id
self.workflow.setup() | Create a workflow
workflow - a workflow class |
def acm_certificate_arn(self, lookup, default=None):
"""
Args:
lookup: region/domain on the certificate to be looked up
default: the optional value to return if lookup failed; returns None if not set
Returns:
ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match
If more than one "Issued" certificate matches the region/domain:
- if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp
- if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate
- certificates issued by Amazon take precedence over certificates not issued by Amazon
"""
# @todo: Only searches the first 100 certificates in the account
try:
# This a region-specific client, so we'll make a new client in the right place using existing SESSION
region_name, domain_name = lookup.split("/")
acm_client = EFAwsResolver.__CLIENTS["SESSION"].client(service_name="acm", region_name=region_name)
response = acm_client.list_certificates(
CertificateStatuses=['ISSUED'],
MaxItems=100
)
except Exception:
return default
# No certificates
if len(response["CertificateSummaryList"]) < 1:
return default
# One or more certificates - find cert with latest IssuedAt date or an arbitrary cert if none are dated
best_match_cert = None
for cert_handle in response["CertificateSummaryList"]:
if cert_handle["DomainName"] == domain_name:
cert = acm_client.describe_certificate(CertificateArn=cert_handle["CertificateArn"])["Certificate"]
# Patch up cert if there is no IssuedAt (i.e. cert was not issued by Amazon)
if not cert.has_key("IssuedAt"):
cert[u"IssuedAt"] = datetime.datetime(1970, 1, 1, 0, 0)
if best_match_cert is None:
best_match_cert = cert
elif cert["IssuedAt"] > best_match_cert["IssuedAt"]:
best_match_cert = cert
if best_match_cert is not None:
return best_match_cert["CertificateArn"]
return default | Args:
lookup: region/domain on the certificate to be looked up
default: the optional value to return if lookup failed; returns None if not set
Returns:
ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match
If more than one "Issued" certificate matches the region/domain:
- if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp
- if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate
- certificates issued by Amazon take precedence over certificates not issued by Amazon |
def safe_popen(*args, **kwargs):
'''This wrapper works around two major deadlock issues to do with pipes.
The first is that, before Python 3.2 on POSIX systems, os.pipe() creates
inheritable file descriptors, which leak to all child processes and prevent
reads from reaching EOF. The workaround for this is to set close_fds=True
on POSIX, which was not the default in those versions. See PEP 0446 for
many details.
The second issue arises on Windows, where we're not allowed to set
close_fds=True while also setting stdin/stdout/stderr. Descriptors from
os.pipe() on Windows have never been inheritable, so it would seem that
we're safe. However, the Windows implementation of subprocess.Popen()
creates temporary inheritable copies of its descriptors, and these can
leak. The workaround for this is to protect Popen() with a global lock. See
https://bugs.python.org/issue25565.'''
close_fds = (os.name != 'nt')
with popen_lock:
return subprocess.Popen(*args, close_fds=close_fds, **kwargs) | This wrapper works around two major deadlock issues to do with pipes.
The first is that, before Python 3.2 on POSIX systems, os.pipe() creates
inheritable file descriptors, which leak to all child processes and prevent
reads from reaching EOF. The workaround for this is to set close_fds=True
on POSIX, which was not the default in those versions. See PEP 0446 for
many details.
The second issue arises on Windows, where we're not allowed to set
close_fds=True while also setting stdin/stdout/stderr. Descriptors from
os.pipe() on Windows have never been inheritable, so it would seem that
we're safe. However, the Windows implementation of subprocess.Popen()
creates temporary inheritable copies of its descriptors, and these can
leak. The workaround for this is to protect Popen() with a global lock. See
https://bugs.python.org/issue25565. |
def minter(record_uuid, data, pid_type, key):
"""Mint PIDs for a record."""
pid = PersistentIdentifier.create(
pid_type,
data[key],
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED
)
for scheme, identifier in data['identifiers'].items():
if identifier:
PersistentIdentifier.create(
scheme,
identifier,
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED
)
return pid | Mint PIDs for a record. |
def from_context(cls):
"""Retrieve this class' instance from the current Click context.
:return: Instance of this class.
:rtype: Config
"""
try:
ctx = click.get_current_context()
except RuntimeError:
return cls()
return ctx.find_object(cls) | Retrieve this class' instance from the current Click context.
:return: Instance of this class.
:rtype: Config |
def _add_timeout_handler(self, handler):
"""Add a `TimeoutHandler` to the main loop."""
# pylint: disable-msg=W0212
now = time.time()
for dummy, method in inspect.getmembers(handler, callable):
if not hasattr(method, "_pyxmpp_timeout"):
continue
self._timeout_handlers.append((now + method._pyxmpp_timeout,
method))
self._timeout_handlers.sort(key = lambda x: x[0]) | Add a `TimeoutHandler` to the main loop. |
def list_not_state(subset=None, show_ip=False, show_ipv4=None):
'''
.. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state
'''
show_ip = _show_ip_migration(show_ip, show_ipv4)
connected = list_state(subset=None, show_ip=show_ip)
key = salt.key.get_key(__opts__)
keys = key.list_keys()
not_connected = []
for minion in keys[key.ACC]:
if minion not in connected and (subset is None or minion in subset):
not_connected.append(minion)
return not_connected | .. versionadded:: 2015.8.0
.. versionchanged:: 2019.2.0
The 'show_ipv4' argument has been renamed to 'show_ip' as it now
includes IPv6 addresses for IPv6-connected minions.
Print a list of all minions that are NOT up according to Salt's presence
detection (no commands will be sent to minions)
subset : None
Pass in a CIDR range to filter minions by IP address.
show_ip : False
Also show the IP address each minion is connecting from.
CLI Example:
.. code-block:: bash
salt-run manage.list_not_state |
def all(self, value, pos=None):
"""Return True if one or many bits are all set to value.
value -- If value is True then checks for bits set to 1, otherwise
checks for bits set to 0.
pos -- An iterable of bit positions. Negative numbers are treated in
the same way as slice indices. Defaults to the whole bitstring.
"""
value = bool(value)
length = self.len
if pos is None:
pos = xrange(self.len)
for p in pos:
if p < 0:
p += length
if not 0 <= p < length:
raise IndexError("Bit position {0} out of range.".format(p))
if not self._datastore.getbit(p) is value:
return False
return True | Return True if one or many bits are all set to value.
value -- If value is True then checks for bits set to 1, otherwise
checks for bits set to 0.
pos -- An iterable of bit positions. Negative numbers are treated in
the same way as slice indices. Defaults to the whole bitstring. |
def pack_bytes(self, obj_dict, encoding=None):
"""Pack a dictionary into a byte stream."""
assert self.dict_to_bytes or self.dict_to_string
encoding = encoding or self.default_encoding or 'utf-8'
LOGGER.debug('%r encoding dict with encoding %s', self, encoding)
if self.dict_to_bytes:
return None, self.dict_to_bytes(obj_dict)
try:
return encoding, self.dict_to_string(obj_dict).encode(encoding)
except LookupError as error:
raise web.HTTPError(
406, 'failed to encode result %r', error,
reason='target charset {0} not found'.format(encoding))
except UnicodeEncodeError as error:
LOGGER.warning('failed to encode text as %s - %s, trying utf-8',
encoding, str(error))
return 'utf-8', self.dict_to_string(obj_dict).encode('utf-8') | Pack a dictionary into a byte stream. |
def has(*permissions, **kwargs):
"""
Checks if the passed bearer has the passed permissions (optionally on
the passed target).
"""
target = kwargs['target']
kwargs['target'] = type_for(target)
# TODO: Predicate evaluation?
return target in filter_(*permissions, **kwargs) | Checks if the passed bearer has the passed permissions (optionally on
the passed target). |
def validate_base_url(base_url):
"""Verify that base_url specifies a protocol and network location."""
parsed_url = urllib.parse.urlparse(base_url)
if parsed_url.scheme and parsed_url.netloc:
return parsed_url.geturl()
else:
error_message = "base_url must contain a valid scheme (protocol " \
"specifier) and network location (hostname)"
raise ValueError(error_message) | Verify that base_url specifies a protocol and network location. |
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj) | Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True |
def translate_basic(usercode):
""" Translate a basic color name to color with explanation. """
codenum = get_code_num(codes['fore'][usercode])
colorcode = codeformat(codenum)
msg = 'Name: {:>10}, Number: {:>3}, EscapeCode: {!r}'.format(
usercode,
codenum,
colorcode
)
if disabled():
return msg
return str(C(msg, fore=usercode)) | Translate a basic color name to color with explanation. |
def copy(self, sleep=_unset, stop=_unset, wait=_unset,
retry=_unset, before=_unset, after=_unset, before_sleep=_unset,
reraise=_unset):
"""Copy this object with some parameters changed if needed."""
if before_sleep is _unset:
before_sleep = self.before_sleep
return self.__class__(
sleep=self.sleep if sleep is _unset else sleep,
stop=self.stop if stop is _unset else stop,
wait=self.wait if wait is _unset else wait,
retry=self.retry if retry is _unset else retry,
before=self.before if before is _unset else before,
after=self.after if after is _unset else after,
before_sleep=before_sleep,
reraise=self.reraise if after is _unset else reraise,
) | Copy this object with some parameters changed if needed. |
def writeRecord(self, infile):
"""Writes to _infile_ the original contents of the Record. This is intended for use by [RecordCollections](./RecordCollection.html#metaknowledge.RecordCollection) to write to file. What is written to _infile_ is bit for bit identical to the original record file (if utf-8 is used). No newline is inserted above the write but the last character is a newline.
# Parameters
_infile_ : `file stream`
> An open utf-8 encoded file
"""
if self.bad:
raise BadWOSRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile))
else:
for tag in self._fieldDict.keys():
for i, value in enumerate(self._fieldDict[tag]):
if i == 0:
infile.write(tag + ' ')
else:
infile.write(' ')
infile.write(value + '\n')
infile.write("ER\n") | Writes to _infile_ the original contents of the Record. This is intended for use by [RecordCollections](./RecordCollection.html#metaknowledge.RecordCollection) to write to file. What is written to _infile_ is bit for bit identical to the original record file (if utf-8 is used). No newline is inserted above the write but the last character is a newline.
# Parameters
_infile_ : `file stream`
> An open utf-8 encoded file |
def patch_all():
"""
Runs all patches. This function ensures that a second invocation has no effect.
"""
global _patched
if _patched:
return
_patched = True
patch_default_retcodes()
patch_worker_run_task()
patch_worker_factory()
patch_keepalive_run()
patch_cmdline_parser()
logger.debug("applied law-specific luigi patches") | Runs all patches. This function ensures that a second invocation has no effect. |
def UpdateManifestResourcesFromXML(dstpath, xmlstr, names=None,
languages=None):
""" Update or add manifest XML as resource in dstpath """
logger.info("Updating manifest in %s", dstpath)
if dstpath.lower().endswith(".exe"):
name = 1
else:
name = 2
winresource.UpdateResources(dstpath, xmlstr, RT_MANIFEST, names or [name],
languages or [0, "*"]) | Update or add manifest XML as resource in dstpath |
def assert_requirements(self):
""""Asserts PEP 508 specifiers."""
# Support for 508's implementation_version.
if hasattr(sys, 'implementation'):
implementation_version = format_full_version(sys.implementation.version)
else:
implementation_version = "0"
# Default to cpython for 2.7.
if hasattr(sys, 'implementation'):
implementation_name = sys.implementation.name
else:
implementation_name = 'cpython'
lookup = {
'os_name': os.name,
'sys_platform': sys.platform,
'platform_machine': platform.machine(),
'platform_python_implementation': platform.python_implementation(),
'platform_release': platform.release(),
'platform_system': platform.system(),
'platform_version': platform.version(),
'python_version': platform.python_version()[:3],
'python_full_version': platform.python_version(),
'implementation_name': implementation_name,
'implementation_version': implementation_version
}
# Assert each specified requirement.
for marker, specifier in self.data['_meta']['requires'].items():
if marker in lookup:
try:
assert lookup[marker] == specifier
except AssertionError:
raise AssertionError('Specifier {!r} does not match {!r}.'.format(marker, specifier)) | Asserts PEP 508 specifiers. |
def redata(self, *args):
"""Make my data represent what's in my rulebook right now"""
if self.rulesview is None:
Clock.schedule_once(self.redata, 0)
return
data = [
{'rulesview': self.rulesview, 'rule': rule, 'index': i, 'ruleslist': self}
for i, rule in enumerate(self.rulebook)
]
self.data = data | Make my data represent what's in my rulebook right now |
def do_plot_and_bestfit(self):
"""
Create plot
"""
# Plot
fmt = str(self.kwargs.get("fmt", "k."))
if "errors" in self.kwargs:
errors = self.kwargs["errors"]
if isinstance(errors, dict):
self.subplot.errorbar(self.x, self.y, fmt=fmt,
xerr=errors.get("xerr", None),
yerr=errors.get("yerr", None))
elif isinstance(errors, (collections.Sequence, np.ndarray, float)):
self.subplot.errorbar(self.x, self.y, fmt=fmt, yerr=errors)
else:
self.subplot.plot(self.x, self.y, fmt)
else:
self.subplot.plot(self.x, self.y, fmt)
# bestfit
bestfit = self.kwargs.get("bestfit", None)
if bestfit is not None:
bestfitlim = self.kwargs.get("bestfitlim", None)
if bestfitlim is None:
bestfitlim = self.kwargs.get("xlim", None)
if bestfitlim is None:
bestfitlim = (min(self.x), max(self.x))
fit_args = bestfit.do_bestfit()
bestfit_line = bestfit.get_bestfit_line(
x_min=bestfitlim[0], x_max=bestfitlim[1])
self.subplot.plot(
bestfit_line[0], bestfit_line[1],
self.kwargs.get("bestfitfmt", "k-")
)
self.outputdict["fit_args"] = fit_args
self.outputdict["rmse"] = bestfit.get_rmse()
return self | Create plot |
def get_process_path(tshark_path=None, process_name="tshark"):
"""
Finds the path of the tshark executable. If the user has provided a path
or specified a location in config.ini it will be used. Otherwise default
locations will be searched.
:param tshark_path: Path of the tshark binary
:raises TSharkNotFoundException in case TShark is not found in any location.
"""
config = get_config()
possible_paths = [config.get(process_name, "%s_path" % process_name)]
# Add the user provided path to the search list
if tshark_path is not None:
possible_paths.insert(0, tshark_path)
# Windows search order: configuration file's path, common paths.
if sys.platform.startswith('win'):
for env in ('ProgramFiles(x86)', 'ProgramFiles'):
program_files = os.getenv(env)
if program_files is not None:
possible_paths.append(
os.path.join(program_files, 'Wireshark', '%s.exe' % process_name)
)
# Linux, etc. search order: configuration file's path, the system's path
else:
os_path = os.getenv(
'PATH',
'/usr/bin:/usr/sbin:/usr/lib/tshark:/usr/local/bin'
)
for path in os_path.split(':'):
possible_paths.append(os.path.join(path, process_name))
for path in possible_paths:
if os.path.exists(path):
if sys.platform.startswith('win'):
path = path.replace("\\", "/")
return path
raise TSharkNotFoundException(
'TShark not found. Try adding its location to the configuration file. '
'Searched these paths: {}'.format(possible_paths)
) | Finds the path of the tshark executable. If the user has provided a path
or specified a location in config.ini it will be used. Otherwise default
locations will be searched.
:param tshark_path: Path of the tshark binary
:raises TSharkNotFoundException in case TShark is not found in any location. |
def rank_path(graph, path, edge_ranking=None):
"""Takes in a path (a list of nodes in the graph) and calculates a score
:param pybel.BELGraph graph: A BEL graph
:param list[tuple] path: A list of nodes in the path (includes terminal nodes)
:param dict edge_ranking: A dictionary of {relationship: score}
:return: The score for the edge
:rtype: int
"""
edge_ranking = default_edge_ranking if edge_ranking is None else edge_ranking
return sum(max(edge_ranking[d[RELATION]] for d in graph.edge[u][v].values()) for u, v in pairwise(path)) | Takes in a path (a list of nodes in the graph) and calculates a score
:param pybel.BELGraph graph: A BEL graph
:param list[tuple] path: A list of nodes in the path (includes terminal nodes)
:param dict edge_ranking: A dictionary of {relationship: score}
:return: The score for the edge
:rtype: int |
def _static_folder_path(static_url, static_folder, static_asset):
"""
Returns a path to a file based on the static folder, and not on the
filesystem holding the file.
Returns a path relative to static_url for static_asset
"""
# first get the asset path relative to the static folder.
# static_asset is not simply a filename because it could be
# sub-directory then file etc.
if not static_asset.startswith(static_folder):
raise ValueError("%s static asset must be under %s static folder" %
(static_asset, static_folder))
rel_asset = static_asset[len(static_folder):]
# Now bolt the static url path and the relative asset location together
return '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/')) | Returns a path to a file based on the static folder, and not on the
filesystem holding the file.
Returns a path relative to static_url for static_asset |
async def can(self, identity, permission) -> bool:
"""
Check user permissions.
:return: ``True`` if the identity is allowed the permission, else return ``False``.
"""
assert isinstance(permission, (str, enum.Enum)), permission
assert permission
identify = await self.identity_policy.identify(identity)
# non-registered user still may has some permissions
access = await self.autz_policy.can(identify, permission)
return access | Check user permissions.
:return: ``True`` if the identity is allowed the permission, else return ``False``. |
def enable_broadcasting(self):
"""Begin accumulating broadcast reports received from all devices.
This method will allocate a queue to receive broadcast reports that
will be filled asynchronously as broadcast reports are received.
Returns:
queue.Queue: A queue that will be filled with braodcast reports.
"""
if self._broadcast_reports is not None:
_clear_queue(self._broadcast_reports)
return self._broadcast_reports
self._broadcast_reports = queue.Queue()
return self._broadcast_reports | Begin accumulating broadcast reports received from all devices.
This method will allocate a queue to receive broadcast reports that
will be filled asynchronously as broadcast reports are received.
Returns:
queue.Queue: A queue that will be filled with braodcast reports. |
def get_user_info(self, access_token):
"""Function to get the information about the user using the access code
obtained from the OP
Note:
Refer to the /.well-known/openid-configuration URL of your OP for
the complete list of the claims for different scopes.
Parameters:
* **access_token (string):** access token from the get_tokens_by_code function
Returns:
**dict:** The user data claims that are returned by the OP in format
Example response::
{
"sub": ["248289761001"],
"name": ["Jane Doe"],
"given_name": ["Jane"],
"family_name": ["Doe"],
"preferred_username": ["j.doe"],
"email": ["janedoe@example.com"],
"picture": ["http://example.com/janedoe/me.jpg"]
}
Raises:
**OxdServerError:** If the param access_token is empty OR if the oxd
Server returns an error.
"""
params = dict(oxd_id=self.oxd_id, access_token=access_token)
params["access_token"] = access_token
logger.debug("Sending command `get_user_info` with params %s",
params)
response = self.msgr.request("get_user_info", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
return response['data']['claims'] | Function to get the information about the user using the access code
obtained from the OP
Note:
Refer to the /.well-known/openid-configuration URL of your OP for
the complete list of the claims for different scopes.
Parameters:
* **access_token (string):** access token from the get_tokens_by_code function
Returns:
**dict:** The user data claims that are returned by the OP in format
Example response::
{
"sub": ["248289761001"],
"name": ["Jane Doe"],
"given_name": ["Jane"],
"family_name": ["Doe"],
"preferred_username": ["j.doe"],
"email": ["janedoe@example.com"],
"picture": ["http://example.com/janedoe/me.jpg"]
}
Raises:
**OxdServerError:** If the param access_token is empty OR if the oxd
Server returns an error. |
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value) | Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays. |
def get_or_set_default(self, section, option, value):
"""
Base method to fetch values and to set defaults in case they
don't exist.
"""
try:
ret = self.get(section, option)
except MissingSetting:
self.set(section, option, value)
ret = value
return ret | Base method to fetch values and to set defaults in case they
don't exist. |
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
"""
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0] | Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations: |
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name) | Find and return a magic of the given type by name.
Returns None if the magic isn't found. |
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response) | Create a new item (if supported)
:param parameters: dict
:return: dict|str |
def __applytns(self, root):
"""Make sure included schema has the same target namespace."""
TNS = "targetNamespace"
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, "%s mismatch" % TNS | Make sure included schema has the same target namespace. |
def _node_type(st):
""" return a string indicating the type of special node represented by
the stat buffer st (block, character, fifo, socket).
"""
_types = [
(stat.S_ISBLK, "block device"),
(stat.S_ISCHR, "character device"),
(stat.S_ISFIFO, "named pipe"),
(stat.S_ISSOCK, "socket")
]
for t in _types:
if t[0](st.st_mode):
return t[1] | return a string indicating the type of special node represented by
the stat buffer st (block, character, fifo, socket). |
def _add_onchain_locksroot_to_snapshot(
raiden: RaidenService,
storage: SQLiteStorage,
snapshot_record: StateChangeRecord,
) -> str:
"""
Add `onchain_locksroot` to each NettingChannelEndState
"""
snapshot = json.loads(snapshot_record.data)
for payment_network in snapshot.get('identifiers_to_paymentnetworks', dict()).values():
for token_network in payment_network.get('tokennetworks', list()):
channelidentifiers_to_channels = token_network.get(
'channelidentifiers_to_channels',
dict(),
)
for channel in channelidentifiers_to_channels.values():
our_locksroot, partner_locksroot = _get_onchain_locksroots(
raiden=raiden,
storage=storage,
token_network=token_network,
channel=channel,
)
channel['our_state']['onchain_locksroot'] = serialize_bytes(our_locksroot)
channel['partner_state']['onchain_locksroot'] = serialize_bytes(partner_locksroot)
return json.dumps(snapshot, indent=4), snapshot_record.identifier | Add `onchain_locksroot` to each NettingChannelEndState |
def date_range_for_webtrends(cls, start_at=None, end_at=None):
"""
Get the day dates in between start and end formatted for query.
This returns dates inclusive e.g. final day is (end_at, end_at+1 day)
"""
if start_at and end_at:
start_date = cls.parse_standard_date_string_to_date(
start_at)
end_date = cls.parse_standard_date_string_to_date(
end_at)
numdays = (end_date - start_date).days + 1
start_dates = [end_date - timedelta(days=x)
for x in reversed(range(0, numdays))]
date_range = []
for i, date in enumerate(start_dates):
query_date = cls.parse_date_for_query(date)
date_range.append((query_date, query_date))
return date_range
else:
return [("current_day-1", "current_day-1")] | Get the day dates in between start and end formatted for query.
This returns dates inclusive e.g. final day is (end_at, end_at+1 day) |
def put(self, key, value):
"""
>>> c = MemSizeLRUCache(maxmem=24*4)
>>> c.put(1, 1)
>>> c.mem() # 24-bytes per integer
24
>>> c.put(2, 2)
>>> c.put(3, 3)
>>> c.put(4, 4)
>>> c.get(1)
1
>>> c.mem()
96
>>> c.size()
4
>>> c.put(5, 5)
>>> c.size()
4
>>> c.get(2)
Traceback (most recent call last):
...
KeyError: 2
"""
mem = sys.getsizeof(value)
if self._mem + mem > self._maxmem:
self.delete(self.last())
LRUCache.put(self, key, (value, mem))
self._mem += mem | >>> c = MemSizeLRUCache(maxmem=24*4)
>>> c.put(1, 1)
>>> c.mem() # 24-bytes per integer
24
>>> c.put(2, 2)
>>> c.put(3, 3)
>>> c.put(4, 4)
>>> c.get(1)
1
>>> c.mem()
96
>>> c.size()
4
>>> c.put(5, 5)
>>> c.size()
4
>>> c.get(2)
Traceback (most recent call last):
...
KeyError: 2 |
def json_query(data, expr):
'''
Query data using JMESPath language (http://jmespath.org).
'''
if jmespath is None:
err = 'json_query requires jmespath module installed'
log.error(err)
raise RuntimeError(err)
return jmespath.search(expr, data) | Query data using JMESPath language (http://jmespath.org). |
def intrusion_sets(self, name, owner=None, **kwargs):
"""
Create the Intrustion Set TI object.
Args:
owner:
name:
**kwargs:
Return:
"""
return IntrusionSet(self.tcex, name, owner=owner, **kwargs) | Create the Intrustion Set TI object.
Args:
owner:
name:
**kwargs:
Return: |
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return bool(conn.exists(path)) | Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod |
def convert_dedent(self):
"""Convert a dedent into an indent"""
# Dedent means go back to last indentation
if self.indent_amounts:
self.indent_amounts.pop()
# Change the token
tokenum = INDENT
# Get last indent amount
last_indent = 0
if self.indent_amounts:
last_indent = self.indent_amounts[-1]
# Make sure we don't have multiple indents in a row
while self.result[-1][0] == INDENT:
self.result.pop()
value = self.indent_type * last_indent
return tokenum, value | Convert a dedent into an indent |
def fetch(cls, id, api_key=None, endpoint=None, add_headers=None,
**kwargs):
"""
Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried.
"""
if endpoint is None:
endpoint = cls.get_endpoint()
inst = cls(api_key=api_key)
parse_key = cls.sanitize_ep(endpoint).split("/")[-1]
endpoint = '/'.join((endpoint, id))
data = cls._parse(inst.request('GET',
endpoint=endpoint,
add_headers=add_headers,
query_params=kwargs),
key=parse_key)
inst._set(data)
return inst | Fetch a single entity from the API endpoint.
Used when you know the exact ID that must be queried. |
def single_row_or_col_df_to_dict(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\
-> Dict[str, str]:
"""
Helper method to convert a dataframe with one row or one or two columns into a dictionary
:param desired_type:
:param single_rowcol_df:
:param logger:
:param kwargs:
:return:
"""
if single_rowcol_df.shape[0] == 1:
return single_rowcol_df.transpose()[0].to_dict()
# return {col_name: single_rowcol_df[col_name][single_rowcol_df.index.values[0]] for col_name in single_rowcol_df.columns}
elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex):
# two columns but the index contains nothing but the row number : we can use the first column
d = single_rowcol_df.set_index(single_rowcol_df.columns[0])
return d[d.columns[0]].to_dict()
elif single_rowcol_df.shape[1] == 1:
# one column and one index
d = single_rowcol_df
return d[d.columns[0]].to_dict()
else:
raise ValueError('Unable to convert provided dataframe to a parameters dictionary : '
'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '') | Helper method to convert a dataframe with one row or one or two columns into a dictionary
:param desired_type:
:param single_rowcol_df:
:param logger:
:param kwargs:
:return: |
def _stmt_list(self, stmts, indent=True):
"""return a list of nodes to string"""
stmts = "\n".join(nstr for nstr in [n.accept(self) for n in stmts] if nstr)
if indent:
return self.indent + stmts.replace("\n", "\n" + self.indent)
return stmts | return a list of nodes to string |
def _render_content_list(self, content, depth, dstack, **settings):
"""
Render the list.
"""
result = []
i = 0
size = len(content)
for value in content:
ds = [(depth, i, size)]
ds = dstack + ds
if isinstance(value, dict):
result.append(self._render_item(ds, "[{}]".format(i), **settings))
result += self._render_content_dict(value, depth + 1, ds, **settings)
elif isinstance(value, list):
result.append(self._render_item(ds, "[{}]".format(i), **settings))
result += self._render_content_list(value, depth + 1, ds, **settings)
else:
result.append(self._render_item(ds, value, **settings))
i += 1
return result | Render the list. |
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple()) | format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC. |
def getCoords(self):
'''
Gets the coords of the View
@return: A tuple containing the View's coordinates ((L, T), (R, B))
'''
if DEBUG_COORDS:
print >>sys.stderr, "getCoords(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
(x, y) = self.getXY();
w = self.getWidth()
h = self.getHeight()
return ((x, y), (x+w, y+h)) | Gets the coords of the View
@return: A tuple containing the View's coordinates ((L, T), (R, B)) |
def cmd_queue_peaks(self):
"""Generate a list of the requests peaks on the queue.
A queue peak is defined by the biggest value on the backend queue
on a series of log lines that are between log lines without being
queued.
.. warning::
Allow to configure up to which peak can be ignored. Currently
set to 1.
"""
threshold = 1
peaks = []
current_peak = 0
current_queue = 0
current_span = 0
first_on_queue = None
for line in self._valid_lines:
current_queue = line.queue_backend
if current_queue > 0:
current_span += 1
if first_on_queue is None:
first_on_queue = line.accept_date
if current_queue == 0 and current_peak > threshold:
data = {
'peak': current_peak,
'span': current_span,
'first': first_on_queue,
'last': line.accept_date,
}
peaks.append(data)
current_peak = 0
current_span = 0
first_on_queue = None
if current_queue > current_peak:
current_peak = current_queue
# case of a series that does not end
if current_queue > 0 and current_peak > threshold:
data = {
'peak': current_peak,
'span': current_span,
'first': first_on_queue,
'last': line.accept_date,
}
peaks.append(data)
return peaks | Generate a list of the requests peaks on the queue.
A queue peak is defined by the biggest value on the backend queue
on a series of log lines that are between log lines without being
queued.
.. warning::
Allow to configure up to which peak can be ignored. Currently
set to 1. |
def get_spider_stats(self):
'''
Gather spider based stats
'''
self.logger.debug("Gathering spider stats")
the_dict = {}
spider_set = set()
total_spider_count = 0
keys = self.redis_conn.keys('stats:crawler:*:*:*')
for key in keys:
# we only care about the spider
elements = key.split(":")
spider = elements[3]
if spider not in the_dict:
the_dict[spider] = {}
the_dict[spider]['count'] = 0
if len(elements) == 6:
# got a time based stat
response = elements[4]
end = elements[5]
if response not in the_dict[spider]:
the_dict[spider][response] = {}
the_dict[spider][response][end] = self._get_key_value(key, end == 'lifetime')
elif len(elements) == 5:
# got a spider identifier
the_dict[spider]['count'] += 1
total_spider_count += 1
spider_set.add(spider)
else:
self.logger.warn("Unknown crawler stat key", {"key":key})
# simple counts
the_dict['unique_spider_count'] = len(spider_set)
the_dict['total_spider_count'] = total_spider_count
ret_dict = {}
ret_dict['spiders'] = the_dict
return ret_dict | Gather spider based stats |
def _get_id2gos(self, associations, **kws):
"""Return given associations in a dict, id2gos"""
options = AnnoOptions(self.evobj, **kws)
# Default reduction is to remove. For all options, see goatools/anno/opts.py:
# * Evidence_Code == ND -> No biological data No biological Data available
# * Qualifiers contain NOT
assc = self.reduce_annotations(associations, options)
return self._get_dbid2goids(assc) if options.b_geneid2gos else self._get_goid2dbids(assc) | Return given associations in a dict, id2gos |
def do_build(self):
"""
We need this hack, else 'self' would be replaced by __iter__.next().
"""
tmp = self.explicit
self.explicit = True
b = super(KeyShareEntry, self).do_build()
self.explicit = tmp
return b | We need this hack, else 'self' would be replaced by __iter__.next(). |
def get_elements(self, json_string, expr):
"""
Get list of elements from _json_string_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression.
*Args:*\n
_json_string_ - JSON string;\n
_expr_ - JSONPath expression;
*Returns:*\n
List of found elements or ``None`` if no elements were found
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
"""
load_input_json = self.string_to_json(json_string)
# parsing jsonpath
jsonpath_expr = parse(expr)
# list of returned elements
value_list = []
for match in jsonpath_expr.find(load_input_json):
value_list.append(match.value)
if not value_list:
return None
else:
return value_list | Get list of elements from _json_string_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression.
*Args:*\n
_json_string_ - JSON string;\n
_expr_ - JSONPath expression;
*Returns:*\n
List of found elements or ``None`` if no elements were found
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien'] |
def order_by_json_path(self, json_path, language_code=None, order='asc'):
"""
Orders a queryset by the value of the specified `json_path`.
More about the `#>>` operator and the `json_path` arg syntax:
https://www.postgresql.org/docs/current/static/functions-json.html
More about Raw SQL expressions:
https://docs.djangoproject.com/en/dev/ref/models/expressions/#raw-sql-expressions
Usage example:
MyModel.objects.language('en_us').filter(is_active=True).order_by_json_path('title')
"""
language_code = (language_code
or self._language_code
or self.get_language_key(language_code))
json_path = '{%s,%s}' % (language_code, json_path)
# Our jsonb field is named `translations`.
raw_sql_expression = RawSQL("translations#>>%s", (json_path,))
if order == 'desc':
raw_sql_expression = raw_sql_expression.desc()
return self.order_by(raw_sql_expression) | Orders a queryset by the value of the specified `json_path`.
More about the `#>>` operator and the `json_path` arg syntax:
https://www.postgresql.org/docs/current/static/functions-json.html
More about Raw SQL expressions:
https://docs.djangoproject.com/en/dev/ref/models/expressions/#raw-sql-expressions
Usage example:
MyModel.objects.language('en_us').filter(is_active=True).order_by_json_path('title') |
def renderThumbnail(self, relpath=""):
"""renderThumbnail() is called to render a thumbnail of the DP (e.g. in Data Product tables)."""
# no thumbnail -- return empty string
if self.thumbnail is None:
return ""
# else thumbnail is same as full image (because image was small enough), insert directly
elif self.thumbnail is self.fullimage:
fname = relpath + self.fullimage
return """<IMG SRC="%s" ALT="%s"></IMG>""" % (quote_url(fname), quote_url(os.path.basename(self.filename)))
# else return thumbnail linking to full image
else:
tname = relpath + self.thumbnail
fname = relpath + self.fullimage
return """<A HREF="%s"><IMG SRC="%s" ALT="%s"></A>""" % (
quote_url(fname), quote_url(tname), quote_url(os.path.basename(self.filename))) | renderThumbnail() is called to render a thumbnail of the DP (e.g. in Data Product tables). |
def remote_command(function, self, *args, **kwargs):
'''
Catch `RuntimeError` exceptions raised by remote control board firmware
commands and re-raise as more specific `FirmwareError` exception type,
which includes command code and return code.
'''
try:
return function(self, *args, **kwargs)
except RuntimeError, exception:
error_message = str(exception)
match = CRE_REMOTE_ERROR.match(error_message)
if match:
# Exception message matches format of remote firmware error.
command_code = int(match.group('command_int'))
return_code = int(match.group('return_code_int'))
raise FirmwareError(command_code, return_code)
match = CRE_REMOTE_COMMAND_ERROR.match(error_message)
if match:
# Exception message matches format of remote firmware error.
command_code = int(match.group('command_int'))
command_name = NAMES_BY_COMMAND_CODE[command_code]
raise RuntimeError(CRE_REMOTE_COMMAND_ERROR.sub(command_name,
error_message))
# Not a remote firmware error, so raise original exception.
raise | Catch `RuntimeError` exceptions raised by remote control board firmware
commands and re-raise as more specific `FirmwareError` exception type,
which includes command code and return code. |
def _eval_target_brutal(state, ip, limit):
"""
The traditional way of evaluating symbolic jump targets.
:param state: A SimState instance.
:param ip: The AST of the instruction pointer to evaluate.
:param limit: The maximum number of concrete IPs.
:return: A list of conditions and the corresponding concrete IPs.
:rtype: list
"""
addrs = state.solver.eval_upto(ip, limit)
return [ (ip == addr, addr) for addr in addrs ] | The traditional way of evaluating symbolic jump targets.
:param state: A SimState instance.
:param ip: The AST of the instruction pointer to evaluate.
:param limit: The maximum number of concrete IPs.
:return: A list of conditions and the corresponding concrete IPs.
:rtype: list |
def pop(self, key, default=NONE):
"""
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
"""
if key in self:
self._list_remove(key)
return self._pop(key)
else:
if default is NONE:
raise KeyError(key)
else:
return default | If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised. |
def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result | Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type) |
def render_name(self, template_name, *context, **kwargs):
"""
Render the template with the given name using the given context.
See the render() docstring for more information.
"""
loader = self._make_loader()
template = loader.load_name(template_name)
return self._render_string(template, *context, **kwargs) | Render the template with the given name using the given context.
See the render() docstring for more information. |
def eol_distance_last(self, offset=0):
"""Return the ammount of characters until the last newline."""
distance = 0
for char in reversed(self.string[:self.pos + offset]):
if char == '\n':
break
else:
distance += 1
return distance | Return the ammount of characters until the last newline. |
def _parse_package(cls, package_string):
"""
Helper method for parsing package string.
Args:
package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
"""
pkg, arch = rsplit(package_string, cls._arch_sep(package_string))
if arch not in KNOWN_ARCHITECTURES:
pkg, arch = (package_string, None)
pkg, release = rsplit(pkg, '-')
name, version = rsplit(pkg, '-')
epoch, version = version.split(':', 1) if ":" in version else ['0', version]
# oracleasm packages have a dash in their version string, fix that
if name.startswith('oracleasm') and name.endswith('.el5'):
name, version2 = name.split('-', 1)
version = version2 + '-' + version
return {
'name': name,
'version': version,
'release': release,
'arch': arch,
'epoch': epoch
} | Helper method for parsing package string.
Args:
package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys |
def to_str(obj):
"""
convert a object to string
"""
if isinstance(obj, str):
return obj
if isinstance(obj, unicode):
return obj.encode('utf-8')
return str(obj) | convert a object to string |
def get_site(self, *args):
""" Returns a sharepoint site
:param args: It accepts multiple ways of retrieving a site:
get_site(host_name): the host_name: host_name ej.
'contoso.sharepoint.com' or 'root'
get_site(site_id): the site_id: a comma separated string of
(host_name, site_collection_id, site_id)
get_site(host_name, path_to_site): host_name ej. 'contoso.
sharepoint.com', path_to_site: a url path (with a leading slash)
get_site(host_name, site_collection_id, site_id):
host_name ej. 'contoso.sharepoint.com'
:rtype: Site
"""
num_args = len(args)
if num_args == 1:
site = args[0]
elif num_args == 2:
host_name, path_to_site = args
path_to_site = '/' + path_to_site if not path_to_site.startswith(
'/') else path_to_site
site = '{}:{}:'.format(host_name, path_to_site)
elif num_args == 3:
site = ','.join(args)
else:
raise ValueError('Incorrect number of arguments')
url = self.build_url(self._endpoints.get('get_site').format(id=site))
response = self.con.get(url)
if not response:
return None
data = response.json()
return self.site_constructor(parent=self,
**{self._cloud_data_key: data}) | Returns a sharepoint site
:param args: It accepts multiple ways of retrieving a site:
get_site(host_name): the host_name: host_name ej.
'contoso.sharepoint.com' or 'root'
get_site(site_id): the site_id: a comma separated string of
(host_name, site_collection_id, site_id)
get_site(host_name, path_to_site): host_name ej. 'contoso.
sharepoint.com', path_to_site: a url path (with a leading slash)
get_site(host_name, site_collection_id, site_id):
host_name ej. 'contoso.sharepoint.com'
:rtype: Site |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.