code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
async def _async_request_soup(url):
'''
Perform a GET web request and return a bs4 parser
'''
from bs4 import BeautifulSoup
import aiohttp
_LOGGER.debug('GET %s', url)
async with aiohttp.ClientSession() as session:
resp = await session.get(url)
text = await resp.text()
return BeautifulSoup(text, 'html.parser') | Perform a GET web request and return a bs4 parser |
def add_data_point(self, x, y, number_format=None):
"""
Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence.
"""
data_point = XyDataPoint(self, x, y, number_format)
self.append(data_point)
return data_point | Return an XyDataPoint object newly created with values *x* and *y*,
and appended to this sequence. |
def name(cls):
"""Return the preferred name as which this command will be known."""
name = cls.__name__.replace("_", "-").lower()
name = name[4:] if name.startswith("cmd-") else name
return name | Return the preferred name as which this command will be known. |
def decode(self, packet):
'''
Decode a UNSUBACK control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
l = decode16Int(packet_remaining[0:2])
topic = packet_remaining[2:2+l].decode(encoding='utf-8')
self.topics.append(topic)
packet_remaining = packet_remaining[2+l:] | Decode a UNSUBACK control packet. |
def fromfile(fname):
"""Open SVG figure from file.
Parameters
----------
fname : str
name of the SVG file
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the file content
"""
fig = SVGFigure()
with open(fname) as fid:
svg_file = etree.parse(fid)
fig.root = svg_file.getroot()
return fig | Open SVG figure from file.
Parameters
----------
fname : str
name of the SVG file
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the file content |
def iter_used_addresses(
adapter, # type: BaseAdapter
seed, # type: Seed
start, # type: int
security_level=None, # type: Optional[int]
):
# type: (...) -> Generator[Tuple[Address, List[TransactionHash]], None, None]
"""
Scans the Tangle for used addresses.
This is basically the opposite of invoking ``getNewAddresses`` with
``stop=None``.
"""
if security_level is None:
security_level = AddressGenerator.DEFAULT_SECURITY_LEVEL
ft_command = FindTransactionsCommand(adapter)
for addy in AddressGenerator(seed, security_level).create_iterator(start):
ft_response = ft_command(addresses=[addy])
if ft_response['hashes']:
yield addy, ft_response['hashes']
else:
break
# Reset the command so that we can call it again.
ft_command.reset() | Scans the Tangle for used addresses.
This is basically the opposite of invoking ``getNewAddresses`` with
``stop=None``. |
def instantiate_labels(instructions):
"""
Takes an iterable of instructions which may contain label placeholders and assigns
them all defined values.
:return: list of instructions with all label placeholders assigned to real labels.
"""
label_i = 1
result = []
label_mapping = dict()
for instr in instructions:
if isinstance(instr, Jump) and isinstance(instr.target, LabelPlaceholder):
new_target, label_mapping, label_i = _get_label(instr.target, label_mapping, label_i)
result.append(Jump(new_target))
elif isinstance(instr, JumpConditional) and isinstance(instr.target, LabelPlaceholder):
new_target, label_mapping, label_i = _get_label(instr.target, label_mapping, label_i)
cls = instr.__class__ # Make the correct subclass
result.append(cls(new_target, instr.condition))
elif isinstance(instr, JumpTarget) and isinstance(instr.label, LabelPlaceholder):
new_label, label_mapping, label_i = _get_label(instr.label, label_mapping, label_i)
result.append(JumpTarget(new_label))
else:
result.append(instr)
return result | Takes an iterable of instructions which may contain label placeholders and assigns
them all defined values.
:return: list of instructions with all label placeholders assigned to real labels. |
def captures(self, uuid, withTitles=False):
"""Return the captures for a given uuid
optional value withTitles=yes"""
picker = lambda x: x.get('capture', [])
return self._get((uuid,), picker, withTitles='yes' if withTitles else 'no') | Return the captures for a given uuid
optional value withTitles=yes |
def get_version(filename, version='1.00'):
''' Read version as text to avoid machinations at import time. '''
with open(filename) as infile:
for line in infile:
if line.startswith('__version__'):
try:
version = line.split("'")[1]
except IndexError:
pass
break
return version | Read version as text to avoid machinations at import time. |
def delete(self, config_file=None):
"""Deletes the credentials file specified in `config_file`. If no
file is specified, it deletes the default user credential file.
Args:
config_file (str): Path to configuration file. Defaults to delete
the user default location if `None`.
.. Tip::
To see if there is a default user credential file stored, do the
following::
>>> creds = Credentials()
>>> print(creds)
Credentials(username=eschbacher, key=abcdefg,
base_url=https://eschbacher.carto.com/)
"""
path_to_remove = config_file or _DEFAULT_PATH
try:
os.remove(path_to_remove)
print('Credentials at {} successfully removed.'.format(
path_to_remove))
except OSError as err:
warnings.warn('No credential file found at {}.'.format(
path_to_remove)) | Deletes the credentials file specified in `config_file`. If no
file is specified, it deletes the default user credential file.
Args:
config_file (str): Path to configuration file. Defaults to delete
the user default location if `None`.
.. Tip::
To see if there is a default user credential file stored, do the
following::
>>> creds = Credentials()
>>> print(creds)
Credentials(username=eschbacher, key=abcdefg,
base_url=https://eschbacher.carto.com/) |
def visit_starred(self, node, parent):
"""visit a Starred node and return a new instance of it"""
context = self._get_context(node)
newnode = nodes.Starred(
ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent
)
newnode.postinit(self.visit(node.value, newnode))
return newnode | visit a Starred node and return a new instance of it |
def get_features_from_equation_file(filename):
"""
returns list of feature names read from equation file given
by ``filename``.
format: one feature per line; comments start with ``#``
Example::
#this is a comment
basefeature
#empty lines are ignored
myfeature
anotherfeature
:param filename:
:return:
"""
features = []
for line in open(filename):
line = line.split('#')[0].strip()
if line:
features.append(line)
return features | returns list of feature names read from equation file given
by ``filename``.
format: one feature per line; comments start with ``#``
Example::
#this is a comment
basefeature
#empty lines are ignored
myfeature
anotherfeature
:param filename:
:return: |
def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic | Associate keywords with their respective indentation numbers |
def plain(self, markup):
""" Strips Wikipedia markup from given text.
This creates a "plain" version of the markup,
stripping images and references and the like.
Does some commonsense maintenance as well,
like collapsing multiple spaces.
If you specified full_strip=False for WikipediaPage instance,
some markup is preserved as HTML (links, bold, italic).
"""
# Strip bold and italic.
if self.full_strip:
markup = markup.replace("'''", "")
markup = markup.replace("''", "")
else:
markup = re.sub("'''([^']*?)'''", "<b>\\1</b>", markup)
markup = re.sub("''([^']*?)''", "<i>\\1</i>", markup)
# Strip image gallery sections.
markup = re.sub(self.re["gallery"], "", markup)
# Strip tables.
markup = re.sub(self.re["table"], "", markup)
markup = markup.replace("||", "")
markup = markup.replace("|}", "")
# Strip links, keeping the display alias.
# We'll strip the ending ]] later.
if self.full_strip:
markup = re.sub(r"\[\[[^\]]*?\|", "", markup)
else:
markup = re.sub(r"\[\[([^]|]*|)\]\]", '<a href="\\1">\\1</a>', markup)
markup = re.sub(r"\[\[([^]|]*|)\|([^]]*)\]\]", '<a href="\\1">\\2</a>', markup)
# Strip translations, users, etc.
markup = re.sub(self.re["translation"], "", markup)
# This math TeX is not supported:
markup = markup.replace("\displaytyle", "")
markup = markup.replace("\textstyle", "")
markup = markup.replace("\scriptstyle", "")
markup = markup.replace("\scriptscriptstyle", "")
# Before stripping [ and ] brackets,
# make sure they are retained inside <math></math> equations.
markup = re.sub("(<math>.*?)\[(.*?</math>)", "\\1MATH___OPEN\\2", markup)
markup = re.sub("(<math>.*?)\](.*?</math>)", "\\1MATH___CLOSE\\2", markup)
markup = markup.replace("[", "")
markup = markup.replace("]", "")
markup = markup.replace("MATH___OPEN", "[")
markup = markup.replace("MATH___CLOSE", "]")
# a) Strip references.
# b) Strip <ref></ref> tags.
# c) Strip <ref name="" /> tags.
# d) Replace --REF--(12) by [12].
# e) Remove space between [12] and trailing punctuation .,
# f) Remove HTML comment <!-- -->
# g) Keep the Latin Extended-B template: {{latinx| }}
# h) Strip Middle-Earth references.
# i) Keep quotes: {{quote| }}
# j) Remove templates
markup = re.sub(self.re["reference"], "", markup) # a
markup = re.sub("</{0,1}ref.*?>", "", markup) # b
markup = re.sub("<ref name=\".*?\" {0,1}/>", "", markup) # c
markup = re.sub(self.ref+"\(([0-9]*?)\)", "[\\1] ", markup) # d
markup = re.sub("\] ([,.\"\?\)])", "]\\1", markup) # e
markup = re.sub(self.re["comment"], "", markup) # f
markup = re.sub("\{\{latinx\|(.*?)\}\}", "\\1", markup) # g
markup = re.sub("\{\{ME-ref.*?\}\}", "", markup) # h
markup = re.sub("\{\{quote\|(.*?)\}\}", "\"\\1\"", markup) # i
markup = re.sub(re.compile("\{\{.*?\}\}", re.DOTALL), "", markup) # j
markup = markup.replace("}}", "")
# Collapse multiple spaces between words,
# unless they appear in preformatted text.
markup = re.sub("<br.*?/{0,1}>", " ", markup)
markup = markup.split("\n")
for i in range(len(markup)):
if not markup[i].startswith(" "):
markup[i] = re.sub(r"[ ]+", " ", markup[i])
markup = "\n".join(markup)
markup = markup.replace(" .", ".")
# Strip all HTML except <math> tags.
if self.full_strip:
markup = strip_tags(markup, exclude=["math"], linebreaks=True)
markup = markup.strip()
return markup | Strips Wikipedia markup from given text.
This creates a "plain" version of the markup,
stripping images and references and the like.
Does some commonsense maintenance as well,
like collapsing multiple spaces.
If you specified full_strip=False for WikipediaPage instance,
some markup is preserved as HTML (links, bold, italic). |
def get_std_xy_dataset_statistics(x_values, y_values, expect_negative_correlation = False, STDev_cutoff = 1.0):
'''Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab.stats.misc.'''
assert(len(x_values) == len(y_values))
csv_lines = ['ID,X,Y'] + [','.join(map(str, [c + 1, x_values[c], y_values[c]])) for c in xrange(len(x_values))]
data = parse_csv(csv_lines, expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff)
assert(len(data['predictions']) == 1)
assert(1 in data['predictions'])
assert(data['predictions'][1]['name'] == 'Y')
summary_data = data['predictions'][1]
stats = {}
for spair in field_name_mapper:
stats[spair[1]] = summary_data[spair[0]]
if stats['std_warnings']:
stats['std_warnings'] = '\n'.join(stats['std_warnings'])
else:
stats['std_warnings'] = None
return stats | Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab.stats.misc. |
def __parse_entry(entry_line):
"""Parse the SOFT file entry name line that starts with '^', '!' or '#'.
Args:
entry_line (:obj:`str`): Line from SOFT to be parsed.
Returns:
:obj:`2-tuple`: Type of entry, value of entry.
"""
if entry_line.startswith("!"):
entry_line = sub(r"!\w*?_", '', entry_line)
else:
entry_line = entry_line.strip()[1:]
try:
entry_type, entry_name = [i.strip() for i in entry_line.split("=", 1)]
except ValueError:
entry_type = [i.strip() for i in entry_line.split("=", 1)][0]
entry_name = ''
return entry_type, entry_name | Parse the SOFT file entry name line that starts with '^', '!' or '#'.
Args:
entry_line (:obj:`str`): Line from SOFT to be parsed.
Returns:
:obj:`2-tuple`: Type of entry, value of entry. |
def create_application(self, description=None):
"""
Creats an application and sets the helpers current
app_name to the created application
"""
out("Creating application " + str(self.app_name))
self.ebs.create_application(self.app_name, description=description) | Creats an application and sets the helpers current
app_name to the created application |
def getJSMinimumVolume(self, **kw):
"""Try convert the MinimumVolume to 'ml' or 'g' so that JS has an
easier time working with it. If conversion fails, return raw value.
"""
default = self.Schema()['MinimumVolume'].get(self)
try:
mgdefault = default.split(' ', 1)
mgdefault = mg(float(mgdefault[0]), mgdefault[1])
except:
mgdefault = mg(0, 'ml')
try:
return str(mgdefault.ounit('ml'))
except:
pass
try:
return str(mgdefault.ounit('g'))
except:
pass
return str(default) | Try convert the MinimumVolume to 'ml' or 'g' so that JS has an
easier time working with it. If conversion fails, return raw value. |
def whitelist(ctx, whitelist_account, account):
""" Add an account to a whitelist
"""
account = Account(account, blockchain_instance=ctx.blockchain)
print_tx(account.whitelist(whitelist_account)) | Add an account to a whitelist |
def resume_job(job_id):
"""Resumes a job."""
try:
current_app.apscheduler.resume_job(job_id)
job = current_app.apscheduler.get_job(job_id)
return jsonify(job)
except JobLookupError:
return jsonify(dict(error_message='Job %s not found' % job_id), status=404)
except Exception as e:
return jsonify(dict(error_message=str(e)), status=500) | Resumes a job. |
def watch(self, limit=None, timeout=None):
"""Block method to watch the clipboard changing."""
start_time = time.time()
count = 0
while not timeout or time.time() - start_time < timeout:
new = self.read()
if new != self.temp:
count += 1
self.callback(new)
if count == limit:
break
self.temp = new
time.sleep(self.interval) | Block method to watch the clipboard changing. |
def trySwitchStatement(self, block):
"""Check for default and case keywords and assume we are in a switch statement.
Try to find a previous default, case or switch and return its indentation or
None if not found.
"""
if not re.match(r'^\s*(default\s*|case\b.*):', block.text()):
return None
for block in self.iterateBlocksBackFrom(block.previous()):
text = block.text()
if re.match(r"^\s*(default\s*|case\b.*):", text):
dbg("trySwitchStatement: success in line %d" % block.blockNumber())
return self._lineIndent(text)
elif re.match(r"^\s*switch\b", text):
if CFG_INDENT_CASE:
return self._increaseIndent(self._lineIndent(text))
else:
return self._lineIndent(text)
return None | Check for default and case keywords and assume we are in a switch statement.
Try to find a previous default, case or switch and return its indentation or
None if not found. |
def dumpJSON(self):
"""
Encodes current parameters to JSON compatible dictionary
"""
numexp = self.number.get()
expTime, _, _, _, _ = self.timing()
if numexp == 0:
numexp = -1
data = dict(
numexp=self.number.value(),
app=self.app.value(),
led_flsh=self.led(),
dummy_out=self.dummy(),
fast_clks=self.fastClk(),
readout=self.readSpeed(),
dwell=self.expose.value(),
exptime=expTime,
oscan=self.oscan(),
oscany=self.oscany(),
xbin=self.wframe.xbin.value(),
ybin=self.wframe.ybin.value(),
multipliers=self.nmult.getall(),
clear=self.clear()
)
# only allow nodding in clear mode, even if GUI has got confused
if data['clear'] and self.nodPattern:
data['nodpattern'] = self.nodPattern
# no mixing clear and multipliers, no matter what GUI says
if data['clear']:
data['multipliers'] = [1 for i in self.nmult.getall()]
# add window mode
if not self.isFF():
if self.isDrift():
# no clear, multipliers or oscan in drift
for setting in ('clear', 'oscan', 'oscany'):
data[setting] = 0
data['multipliers'] = [1 for i in self.nmult.getall()]
for iw, (xsl, xsr, ys, nx, ny) in enumerate(self.wframe):
data['x{}start_left'.format(iw+1)] = xsl
data['x{}start_right'.format(iw+1)] = xsr
data['y{}start'.format(iw+1)] = ys
data['y{}size'.format(iw+1)] = ny
data['x{}size'.format(iw+1)] = nx
else:
# no oscany in window mode
data['oscany'] = 0
for iw, (xsll, xsul, xslr, xsur, ys, nx, ny) in enumerate(self.wframe):
data['x{}start_upperleft'.format(iw+1)] = xsul
data['x{}start_lowerleft'.format(iw+1)] = xsll
data['x{}start_upperright'.format(iw+1)] = xsur
data['x{}start_lowerright'.format(iw+1)] = xslr
data['y{}start'.format(iw+1)] = ys
data['x{}size'.format(iw+1)] = nx
data['y{}size'.format(iw+1)] = ny
return data | Encodes current parameters to JSON compatible dictionary |
def load_module_from_file_object(fp, filename='<unknown>', code_objects=None, fast_load=False,
get_code=True):
"""load a module from a file object without importing it.
See :func:load_module for a list of return values.
"""
if code_objects is None:
code_objects = {}
timestamp = 0
try:
magic = fp.read(4)
magic_int = magics.magic2int(magic)
# For reasons I don't understand, PyPy 3.2 stores a magic
# of '0'... The two values below are for Python 2.x and 3.x respectively
if magic[0:1] in ['0', b'0']:
magic = magics.int2magic(3180+7)
try:
# FIXME: use the internal routine below
float_version = float(magics.versions[magic][:3])
# float_version = magics.magic_int2float(magic_int)
except KeyError:
if magic_int in (2657, 22138):
raise ImportError("This smells like Pyston which is not supported.")
if len(magic) >= 2:
raise ImportError("Unknown magic number %s in %s" %
(ord(magic[0:1])+256*ord(magic[1:2]), filename))
else:
raise ImportError("Bad magic number: '%s'" % magic)
if magic_int in (3010, 3020, 3030, 3040, 3050, 3060, 3061, 3361, 3371):
raise ImportError("%s is interim Python %s (%d) bytecode which is "
"not supported.\nFinal released versions are "
"supported." % (
filename, magics.versions[magic],
magics.magic2int(magic)))
elif magic_int == 62135:
fp.seek(0)
return fix_dropbox_pyc(fp)
elif magic_int == 62215:
raise ImportError("%s is a dropbox-hacked Python %s (bytecode %d).\n"
"See https://github.com/kholia/dedrop for how to "
"decrypt." % (
filename, magics.versions[magic],
magics.magic2int(magic)))
try:
# print version
ts = fp.read(4)
my_magic_int = magics.magic2int(imp.get_magic())
magic_int = magics.magic2int(magic)
if magic_int == 3393:
timestamp = 0
_ = unpack("<I", ts)[0] # hash word 1
_ = unpack("<I", fp.read(4))[0] # hash word 2
elif magic_int in (3394, 3401):
timestamp = 0
_ = unpack("<I", fp.read(4))[0] # pep552_bits
else:
timestamp = unpack("<I", ts)[0]
# Note: a higher magic number doesn't necessarily mean a later
# release. At Python 3.0 the magic number decreased
# significantly. Hence the range below. Also note inclusion of
# the size info, occurred within a Python major/minor
# release. Hence the test on the magic value rather than
# PYTHON_VERSION, although PYTHON_VERSION would probably work.
if 3200 <= magic_int < 20121 and magic_int not in (5892, 11913, 39170, 39171):
source_size = unpack("<I", fp.read(4))[0] # size mod 2**32
else:
source_size = None
if get_code:
if my_magic_int == magic_int:
bytecode = fp.read()
co = marshal.loads(bytecode)
elif fast_load:
co = xdis.marsh.load(fp, magics.magicint2version[magic_int])
else:
co = xdis.unmarshal.load_code(fp, magic_int, code_objects)
pass
else:
co = None
except:
kind, msg = sys.exc_info()[0:2]
import traceback
traceback.print_exc()
raise ImportError("Ill-formed bytecode file %s\n%s; %s"
% (filename, kind, msg))
finally:
fp.close()
return float_version, timestamp, magic_int, co, is_pypy(magic_int), source_size | load a module from a file object without importing it.
See :func:load_module for a list of return values. |
def age(self):
"""
Get closer to your EOL
"""
# 0 means this composer will never decompose
if self.rounds == 1:
self.do_run = False
elif self.rounds > 1:
self.rounds -= 1 | Get closer to your EOL |
def proton_hydroxide_free_energy(temperature, pressure, pH):
"""Returns the Gibbs free energy of proton in bulk solution.
Parameters
----------
pH : pH of bulk solution
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide.
"""
H2 = GasMolecule('H2')
H2O = GasMolecule('H2O')
G_H2 = H2.get_free_energy(temperature = temperature, pressure = pressure)
G_H2O = H2O.get_free_energy(temperature = temperature)
G_H = (0.5*G_H2) - ((R*temperature)/(z*F))*ln10*pH
G_OH = G_H2O - G_H # Do not need Kw when water equilibrated
return(G_H, G_OH) | Returns the Gibbs free energy of proton in bulk solution.
Parameters
----------
pH : pH of bulk solution
temperature : numeric
temperature in K
pressure : numeric
pressure in mbar
Returns
-------
G_H, G_OH : Gibbs free energy of proton and hydroxide. |
def get_state_data(cls, entity):
"""
Returns the state data for the given entity.
This also works for unmanaged entities.
"""
attrs = get_domain_class_attribute_iterator(type(entity))
return dict([(attr,
get_nested_attribute(entity, attr.entity_attr))
for attr in attrs
if not attr.entity_attr is None]) | Returns the state data for the given entity.
This also works for unmanaged entities. |
def parse(self, text, **kwargs):
'''Parse the given text and return result from MeCab.
:param text: the text to parse.
:type text: str
:param as_nodes: return generator of MeCabNodes if True;
or string if False.
:type as_nodes: bool, defaults to False
:param boundary_constraints: regular expression for morpheme boundary
splitting; if non-None and feature_constraints is None, then
boundary constraint parsing will be used.
:type boundary_constraints: str or re
:param feature_constraints: tuple containing tuple instances of
target morpheme and corresponding feature string in order
of precedence; if non-None and boundary_constraints is None,
then feature constraint parsing will be used.
:type feature_constraints: tuple
:return: A single string containing the entire MeCab output;
or a Generator yielding the MeCabNode instances.
:raises: MeCabError
'''
if text is None:
logger.error(self._ERROR_EMPTY_STR)
raise MeCabError(self._ERROR_EMPTY_STR)
elif not isinstance(text, str):
logger.error(self._ERROR_NOTSTR)
raise MeCabError(self._ERROR_NOTSTR)
elif 'partial' in self.options and not text.endswith("\n"):
logger.error(self._ERROR_MISSING_NL)
raise MeCabError(self._ERROR_MISSING_NL)
if self._KW_BOUNDARY in kwargs:
val = kwargs[self._KW_BOUNDARY]
if not isinstance(val, self._REGEXTYPE) and not isinstance(val, str):
logger.error(self._ERROR_BOUNDARY)
raise MeCabError(self._ERROR_BOUNDARY)
elif self._KW_FEATURE in kwargs:
val = kwargs[self._KW_FEATURE]
if not isinstance(val, tuple):
logger.error(self._ERROR_FEATURE)
raise MeCabError(self._ERROR_FEATURE)
as_nodes = kwargs.get(self._KW_ASNODES, False)
if as_nodes:
return self.__parse_tonodes(text, **kwargs)
else:
return self.__parse_tostr(text, **kwargs) | Parse the given text and return result from MeCab.
:param text: the text to parse.
:type text: str
:param as_nodes: return generator of MeCabNodes if True;
or string if False.
:type as_nodes: bool, defaults to False
:param boundary_constraints: regular expression for morpheme boundary
splitting; if non-None and feature_constraints is None, then
boundary constraint parsing will be used.
:type boundary_constraints: str or re
:param feature_constraints: tuple containing tuple instances of
target morpheme and corresponding feature string in order
of precedence; if non-None and boundary_constraints is None,
then feature constraint parsing will be used.
:type feature_constraints: tuple
:return: A single string containing the entire MeCab output;
or a Generator yielding the MeCabNode instances.
:raises: MeCabError |
def stat( self, *args ):
'''Check process completion and consume pending I/O data'''
self.pipe.poll()
if not self.pipe.returncode is None:
'''cleanup handlers and timeouts'''
if not self.expiration is None:
self.ioloop.remove_timeout(self.expiration)
for fd, dest in self.streams:
self.ioloop.remove_handler(fd)
'''schedulle callback (first try to read all pending data)'''
self.ioloop.add_callback(self.on_finish)
for fd, dest in self.streams:
while True:
try:
data = os.read(fd, 4096)
if len(data) == 0:
break
print(data.rstrip())
except:
break | Check process completion and consume pending I/O data |
def first(self, callback=None, default=None):
"""
Get the first item of the collection.
:param default: The default value
:type default: mixed
"""
if callback is not None:
for val in self.items:
if callback(val):
return val
return value(default)
if len(self.items) > 0:
return self.items[0]
else:
return default | Get the first item of the collection.
:param default: The default value
:type default: mixed |
def unzip(zipped_file, output_directory=None,
prefix="harvestingkit_unzip_", suffix=""):
"""Uncompress a zipped file from given filepath to an (optional) location.
If no location is given, a temporary folder will be generated inside
CFG_TMPDIR, prefixed with "apsharvest_unzip_".
"""
if not output_directory:
# We create a temporary directory to extract our stuff in
try:
output_directory = mkdtemp(suffix=suffix,
prefix=prefix)
except Exception, e:
try:
os.removedirs(output_directory)
except TypeError:
pass
raise e
return _do_unzip(zipped_file, output_directory) | Uncompress a zipped file from given filepath to an (optional) location.
If no location is given, a temporary folder will be generated inside
CFG_TMPDIR, prefixed with "apsharvest_unzip_". |
def pair(self):
"""
Returns a callable and an iterable respectively. Those can be used to
both transmit a message and/or iterate over incoming messages, that were
sent by a pair socket. Note that the iterable returns as many parts as
sent by a pair. Also, the sender function has a ``print`` like signature,
with an infinite number of arguments. Each one being a part of the
complete message.
:rtype: (function, generator)
"""
sock = self.__sock(zmq.PAIR)
return self.__send_function(sock), self.__recv_generator(sock) | Returns a callable and an iterable respectively. Those can be used to
both transmit a message and/or iterate over incoming messages, that were
sent by a pair socket. Note that the iterable returns as many parts as
sent by a pair. Also, the sender function has a ``print`` like signature,
with an infinite number of arguments. Each one being a part of the
complete message.
:rtype: (function, generator) |
def _adjust_router_list_for_global_router(self, routers):
"""
Pushes 'Global' routers to the end of the router list, so that
deleting default route occurs before deletion of external nw subintf
"""
#ToDo(Hareesh): Simplify if possible
for r in routers:
if r[ROUTER_ROLE_ATTR] == c_constants.ROUTER_ROLE_GLOBAL:
LOG.debug("Global router:%s found. Moved to the end of list "
"for processing", r['id'])
routers.remove(r)
routers.append(r) | Pushes 'Global' routers to the end of the router list, so that
deleting default route occurs before deletion of external nw subintf |
def _run_eos_cmds(self, commands, switch):
"""Execute/sends a CAPI (Command API) command to EOS.
This method is useful for running show commands that require no
prefix or postfix commands.
:param commands : List of commands to be executed on EOS.
:param switch: Endpoint on the Arista switch to be configured
"""
LOG.info(_LI('Executing command on Arista EOS: %s'), commands)
try:
# this returns array of return values for every command in
# commands list
ret = switch.execute(commands)
LOG.info(_LI('Results of execution on Arista EOS: %s'), ret)
return ret
except Exception:
msg = (_('Error occurred while trying to execute '
'commands %(cmd)s on EOS %(host)s') %
{'cmd': commands, 'host': switch})
LOG.exception(msg) | Execute/sends a CAPI (Command API) command to EOS.
This method is useful for running show commands that require no
prefix or postfix commands.
:param commands : List of commands to be executed on EOS.
:param switch: Endpoint on the Arista switch to be configured |
def save(self, **kwargs):
"""Overrides models.Model.save.
- Delete formatted photos if format save and not now created
(because of possible changes)
"""
if self.id:
for f_photo in self.formatedphoto_set.all():
f_photo.delete()
super(Format, self).save(**kwargs) | Overrides models.Model.save.
- Delete formatted photos if format save and not now created
(because of possible changes) |
def _ParseFSMVariables(self, template):
"""Extracts Variables from start of template file.
Values are expected as a contiguous block at the head of the file.
These will be line separated from the State definitions that follow.
Args:
template: Valid template file, with Value definitions at the top.
Raises:
TextFSMTemplateError: If syntax or semantic errors are found.
"""
self.values = []
for line in template:
self._line_num += 1
line = line.rstrip()
# Blank line signifies end of Value definitions.
if not line:
return
# Skip commented lines.
if self.comment_regex.match(line):
continue
if line.startswith('Value '):
try:
value = TextFSMValue(
fsm=self, max_name_len=self.MAX_NAME_LEN,
options_class=self._options_cls)
value.Parse(line)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
if value.name in self.header:
raise TextFSMTemplateError(
"Duplicate declarations for Value '%s'. Line: %s."
% (value.name, self._line_num))
try:
self._ValidateOptions(value)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
self.values.append(value)
self.value_map[value.name] = value.template
# The line has text but without the 'Value ' prefix.
elif not self.values:
raise TextFSMTemplateError('No Value definitions found.')
else:
raise TextFSMTemplateError(
'Expected blank line after last Value entry. Line: %s.'
% (self._line_num)) | Extracts Variables from start of template file.
Values are expected as a contiguous block at the head of the file.
These will be line separated from the State definitions that follow.
Args:
template: Valid template file, with Value definitions at the top.
Raises:
TextFSMTemplateError: If syntax or semantic errors are found. |
def instruction_RTI(self, opcode):
"""
The saved machine state is recovered from the hardware stack and control
is returned to the interrupted program. If the recovered E (entire) bit
is clear, it indicates that only a subset of the machine state was saved
(return address and condition codes) and only that subset is recovered.
source code forms: RTI
CC bits "HNZVC": -----
"""
cc = self.pull_byte(self.system_stack_pointer) # CC
self.set_cc(cc)
if self.E:
self.accu_a.set(
self.pull_byte(self.system_stack_pointer) # A
)
self.accu_b.set(
self.pull_byte(self.system_stack_pointer) # B
)
self.direct_page.set(
self.pull_byte(self.system_stack_pointer) # DP
)
self.index_x.set(
self.pull_word(self.system_stack_pointer) # X
)
self.index_y.set(
self.pull_word(self.system_stack_pointer) # Y
)
self.user_stack_pointer.set(
self.pull_word(self.system_stack_pointer) # U
)
self.program_counter.set(
self.pull_word(self.system_stack_pointer) # PC
) | The saved machine state is recovered from the hardware stack and control
is returned to the interrupted program. If the recovered E (entire) bit
is clear, it indicates that only a subset of the machine state was saved
(return address and condition codes) and only that subset is recovered.
source code forms: RTI
CC bits "HNZVC": ----- |
def make_python_identifier(string, namespace=None, reserved_words=None,
convert='drop', handle='force'):
"""
Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers
"""
if namespace is None:
namespace = dict()
if reserved_words is None:
reserved_words = list()
if string in namespace:
return namespace[string], namespace
# create a working copy (and make it lowercase, while we're at it)
s = string.lower()
# remove leading and trailing whitespace
s = s.strip()
# Make spaces into underscores
s = re.sub('[\\s\\t\\n]+', '_', s)
if convert == 'hex':
# Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language),
# \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)
# and \p{n} designates all numbers. We allow any of these to be present in the regex.
s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s])
elif convert == 'drop':
# Remove invalid characters
s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s)
# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.
s = re.sub('^[^\p{l}_]+', '', s)
# Check that the string is not a python identifier
while (s in keyword.kwlist or
s in namespace.values() or
s in reserved_words):
if handle == 'throw':
raise NameError(s + ' already exists in namespace or is a reserved word')
if handle == 'force':
if re.match(".*?_\d+$", s):
i = re.match(".*?_(\d+)$", s).groups()[0]
s = s.strip('_' + i) + '_' + str(int(i) + 1)
else:
s += '_1'
namespace[string] = s
return s, namespace | Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers |
def create_repository(self, repository_form=None):
"""Creates a new ``Repository``.
:param repository_form: the form for this ``Repository``
:type repository_form: ``osid.repository.RepositoryForm``
:return: the new ``Repository``
:rtype: ``osid.repository.Repository``
:raise: ``IllegalState`` -- ``repository_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
if repository_form is None:
raise NullArgument()
if not isinstance(repository_form, abc_repository_objects.RepositoryForm):
raise InvalidArgument('argument type is not a RepositoryForm')
if repository_form.is_for_update():
raise InvalidArgument('form is for update only, not create')
try:
if self._forms[repository_form.get_id().get_identifier()] == CREATED:
raise IllegalState('form already used in a create transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not repository_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('objective_banks')
try:
result = self._post_request(url_path, repository_form._my_map)
except Exception:
raise # OperationFailed
self._forms[repository_form.get_id().get_identifier()] = CREATED
return objects.Repository(result) | Creates a new ``Repository``.
:param repository_form: the form for this ``Repository``
:type repository_form: ``osid.repository.RepositoryForm``
:return: the new ``Repository``
:rtype: ``osid.repository.Repository``
:raise: ``IllegalState`` -- ``repository_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_create()``
*compliance: mandatory -- This method must be implemented.* |
def print_file(self, f=sys.stdout, file_format="cif", tw=0):
"""Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "cif":
for key in self.keys():
if key == u"data":
print(u"{}_{}".format(key, self[key]), file=f)
elif key.startswith(u"comment"):
print(u"{}".format(self[key].strip()), file=f)
elif key.startswith(u"loop_"):
print(u"{}loop_".format(tw * u" "), file=f)
self.print_loop(key, f, file_format, tw)
else:
# handle the NMR-Star "multiline string"
if self[key].endswith(u"\n"):
print(u"{}_{}".format(tw * u" ", key), file=f)
print(u";{};".format(self[key]), file=f)
# need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words
elif len(self[key].split()) > 1:
print(u"{}_{}\t {}".format(tw * u" ", key, u"'{}'".format(self[key])), file=f)
else:
print(u"{}_{}\t {}".format(tw * u" ", key, self[key]), file=f)
elif file_format == "json":
print(self._to_json(), file=f) | Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None` |
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None)) | Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5] |
def can_use_cached_output(self, contentitem):
"""
Tell whether the code should try reading cached output
"""
plugin = contentitem.plugin
return appsettings.FLUENT_CONTENTS_CACHE_OUTPUT and plugin.cache_output and contentitem.pk | Tell whether the code should try reading cached output |
def unsafe(self):
"""True if the mapping is unsafe for an update.
Applies only to local source. Returns True if the paths for source and
destination are the same, or if one is a component of the other path.
"""
(scheme, netloc, path, params, query, frag) = urlparse(self.src_uri)
if (scheme != ''):
return(False)
s = os.path.normpath(self.src_uri)
d = os.path.normpath(self.dst_path)
lcp = os.path.commonprefix([s, d])
return(s == lcp or d == lcp) | True if the mapping is unsafe for an update.
Applies only to local source. Returns True if the paths for source and
destination are the same, or if one is a component of the other path. |
def authenticate(self, request):
"""
Authenticate a client against all the backends configured in
:attr:`authentication`.
"""
for backend in self.authentication:
client = backend().authenticate(request)
if client is not None:
return client
return None | Authenticate a client against all the backends configured in
:attr:`authentication`. |
def augment_initial_layout(self, base_response, initial_arguments=None):
'Add application state to initial values'
if self.use_dash_layout() and not initial_arguments and False:
return base_response.data, base_response.mimetype
# Adjust the base layout response
baseDataInBytes = base_response.data
baseData = json.loads(baseDataInBytes.decode('utf-8'))
# Also add in any initial arguments
if initial_arguments:
if isinstance(initial_arguments, str):
initial_arguments = json.loads(initial_arguments)
# Walk tree. If at any point we have an element whose id
# matches, then replace any named values at this level
reworked_data = self.walk_tree_and_replace(baseData, initial_arguments)
response_data = json.dumps(reworked_data,
cls=PlotlyJSONEncoder)
return response_data, base_response.mimetype | Add application state to initial values |
def parse_non_selinux(parts):
"""
Parse part of an ls output line that isn't selinux.
Args:
parts (list): A four element list of strings representing the initial
parts of an ls line after the permission bits. The parts are link
count, owner, group, and everything else.
Returns:
A dict containing links, owner, group, date, and name. If the line
represented a device, major and minor numbers are included. Otherwise,
size is included. If the raw name was a symbolic link, link is
included.
"""
links, owner, group, last = parts
result = {
"links": int(links),
"owner": owner,
"group": group,
}
# device numbers only go to 256.
# If a comma is in the first four characters, the next two elements are
# major and minor device numbers. Otherwise, the next element is the size.
if "," in last[:4]:
major, minor, rest = last.split(None, 2)
result["major"] = int(major.rstrip(","))
result["minor"] = int(minor)
else:
size, rest = last.split(None, 1)
result["size"] = int(size)
# The date part is always 12 characters regardless of content.
result["date"] = rest[:12]
# Jump over the date and the following space to get the path part.
path, link = parse_path(rest[13:])
result["name"] = path
if link:
result["link"] = link
return result | Parse part of an ls output line that isn't selinux.
Args:
parts (list): A four element list of strings representing the initial
parts of an ls line after the permission bits. The parts are link
count, owner, group, and everything else.
Returns:
A dict containing links, owner, group, date, and name. If the line
represented a device, major and minor numbers are included. Otherwise,
size is included. If the raw name was a symbolic link, link is
included. |
def _vis_calibrate(self, data):
"""Calibrate visible channels to reflectance."""
solar_irradiance = self['esun']
esd = self["earth_sun_distance_anomaly_in_AU"].astype(float)
factor = np.pi * esd * esd / solar_irradiance
res = data * factor
res.attrs = data.attrs
res.attrs['units'] = '1'
res.attrs['standard_name'] = 'toa_bidirectional_reflectance'
return res | Calibrate visible channels to reflectance. |
def WriteEventBody(self, event):
"""Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event.
"""
for field_name in self._fields:
if field_name == 'datetime':
output_value = self._FormatDateTime(event)
else:
output_value = self._dynamic_fields_helper.GetFormattedField(
event, field_name)
output_value = self._RemoveIllegalXMLCharacters(output_value)
# Auto adjust the column width based on the length of the output value.
column_index = self._fields.index(field_name)
self._column_widths.setdefault(column_index, 0)
if field_name == 'datetime':
column_width = min(
self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2)
else:
column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2)
self._column_widths[column_index] = max(
self._MIN_COLUMN_WIDTH, self._column_widths[column_index],
column_width)
self._sheet.set_column(
column_index, column_index, self._column_widths[column_index])
if (field_name == 'datetime'
and isinstance(output_value, datetime.datetime)):
self._sheet.write_datetime(
self._current_row, column_index, output_value)
else:
self._sheet.write(self._current_row, column_index, output_value)
self._current_row += 1 | Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event. |
def _handle_watch_message(self, message):
"""
Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes
"""
if self.log_protocol_level is not None:
logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode())
message = self.pending_bytes + message
while len(message) >= 4:
try:
packet, length = PebblePacket.parse_message(message)
except IncompleteMessage:
self.pending_bytes = message
break
except:
# At this point we've failed to deconstruct the message via normal means, but we don't want to end
# up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because
# we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far.
# If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably
# screwed).
expected_length, = struct.unpack('!H', message[:2])
if expected_length == 0:
self.pending_bytes = b''
else:
self.pending_bytes = message[expected_length + 4:]
raise
self.event_handler.broadcast_event("raw_inbound", message[:length])
if self.log_packet_level is not None:
logger.log(self.log_packet_level, "<- %s", packet)
message = message[length:]
self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet)
if length == 0:
break
self.pending_bytes = message | Processes a binary message received from the watch and broadcasts the relevant events.
:param message: A raw message from the watch, without any transport framing.
:type message: bytes |
def check_for_maintenance(self):
'''
Returns True if the maintenance worker should be run now,
and False otherwise.
:return:
'''
numrevs = self.conn.get_one("SELECT count(revnum) FROM csetLog")[0]
if numrevs >= SIGNAL_MAINTENACE_CSETS:
return True
return False | Returns True if the maintenance worker should be run now,
and False otherwise.
:return: |
def df_quantile(df, nb=100):
"""Returns the nb quantiles for datas in a dataframe
"""
quantiles = np.linspace(0, 1., nb)
res = pd.DataFrame()
for q in quantiles:
res = res.append(df.quantile(q), ignore_index=True)
return res | Returns the nb quantiles for datas in a dataframe |
def _get_base_class_names(frame):
""" Get baseclass names from the code object """
co, lasti = frame.f_code, frame.f_lasti
code = co.co_code
extends = []
for (op, oparg) in op_stream(code, lasti):
if op in dis.hasconst:
if type(co.co_consts[oparg]) == str:
extends = []
elif op in dis.hasname:
if dis.opname[op] == 'LOAD_NAME':
extends.append(('name', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_ATTR':
extends.append(('attr', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_GLOBAL':
extends.append(('name', co.co_names[oparg]))
items = []
previous_item = []
for t, s in extends:
if t == 'name':
if previous_item:
items.append(previous_item)
previous_item = [s]
else:
previous_item += [s]
if previous_item:
items.append(previous_item)
return items | Get baseclass names from the code object |
def palettize(self, colormap):
"""Palettize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
l_data = self.data.sel(bands=['L'])
def _palettize(data):
# returns data and palette, only need data
return colormap.palettize(data)[0]
new_data = l_data.data.map_blocks(_palettize, dtype=l_data.dtype)
self.palette = tuple(colormap.colors)
if self.mode == "L":
mode = "P"
else:
mode = "PA"
new_data = da.concatenate([new_data, self.data.sel(bands=['A'])], axis=0)
self.data.data = new_data
self.data.coords['bands'] = list(mode) | Palettize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images. |
def off_policy_train_batch(self, batch_info: BatchInfo):
""" Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """
self.model.train()
rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device)
batch_result = self.algo.optimizer_step(
batch_info=batch_info,
device=self.device,
model=self.model,
rollout=rollout
)
batch_info['sub_batch_data'].append(batch_result) | Perform an 'off-policy' training step of sampling the replay buffer and gradient descent |
def POST(self, id):
""" Delete based on ID """
id = int(id)
model.del_todo(id)
raise web.seeother('/') | Delete based on ID |
def main():
"""This is the CLI driver for ia-wrapper."""
args = docopt(__doc__, version=__version__, options_first=True)
# Validate args.
s = Schema({
six.text_type: bool,
'--config-file': Or(None, str),
'<args>': list,
'<command>': Or(str, lambda _: 'help'),
})
try:
args = s.validate(args)
except SchemaError as exc:
print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)), file=sys.stderr)
sys.exit(1)
# Get subcommand.
cmd = args['<command>']
if cmd in cmd_aliases:
cmd = cmd_aliases[cmd]
if (cmd == 'help') or (not cmd):
if not args['<args>']:
sys.exit(print(__doc__.strip(), file=sys.stderr))
else:
ia_module = load_ia_module(args['<args>'][0])
sys.exit(print(ia_module.__doc__.strip(), file=sys.stderr))
if cmd != 'configure' and args['--config-file']:
if not os.path.isfile(args['--config-file']):
print('--config-file should be a readable file.\n{0}'.format(
printable_usage(__doc__)), file=sys.stderr)
sys.exit(1)
argv = [cmd] + args['<args>']
config = dict()
if args['--log']:
config['logging'] = {'level': 'INFO'}
elif args['--debug']:
config['logging'] = {'level': 'DEBUG'}
if args['--insecure']:
config['general'] = dict(secure=False)
session = get_session(config_file=args['--config-file'],
config=config,
debug=args['--debug'])
ia_module = load_ia_module(cmd)
try:
sys.exit(ia_module.main(argv, session))
except IOError as e:
# Handle Broken Pipe errors.
if e.errno == errno.EPIPE:
sys.stderr.close()
sys.stdout.close()
sys.exit(0)
else:
raise | This is the CLI driver for ia-wrapper. |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ElasticSearchCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 9200,
'user': '',
'password': '',
'instances': [],
'scheme': 'http',
'path': 'elasticsearch',
'stats': ['jvm', 'thread_pool', 'indices'],
'logstash_mode': False,
'cluster': False,
})
return config | Returns the default collector settings |
def unmarshal( compoundSignature, data, offset = 0, lendian = True ):
"""
Unmarshals DBus encoded data.
@type compoundSignature: C{string}
@param compoundSignature: DBus signature specifying the encoded value types
@type data: C{string}
@param data: Binary data
@type offset: C{int}
@param offset: Offset within data at which data for compoundSignature
starts (used during recursion)
@type lendian: C{bool}
@param lendian: True if data is encoded in little-endian format
@returns: (number_of_bytes_decoded, list_of_values)
"""
values = list()
start_offset = offset
for ct in genCompleteTypes( compoundSignature ):
tcode = ct[0]
offset += len(pad[tcode]( offset ))
nbytes, value = unmarshallers[ tcode ]( ct, data, offset, lendian )
offset += nbytes
values.append( value )
return offset - start_offset, values | Unmarshals DBus encoded data.
@type compoundSignature: C{string}
@param compoundSignature: DBus signature specifying the encoded value types
@type data: C{string}
@param data: Binary data
@type offset: C{int}
@param offset: Offset within data at which data for compoundSignature
starts (used during recursion)
@type lendian: C{bool}
@param lendian: True if data is encoded in little-endian format
@returns: (number_of_bytes_decoded, list_of_values) |
def aq_esc_telemetry_encode(self, time_boot_ms, seq, num_motors, num_in_seq, escid, status_age, data_version, data0, data1):
'''
Sends ESC32 telemetry data for up to 4 motors. Multiple messages may
be sent in sequence when system has > 4 motors. Data
is described as follows:
// unsigned int state : 3;
// unsigned int vin : 12; // x 100
// unsigned int amps : 14; // x 100
// unsigned int rpm : 15;
// unsigned int duty : 8; // x (255/100)
// - Data Version 2 - //
unsigned int errors : 9; // Bad detects error
count // - Data Version 3
- // unsigned int temp
: 9; // (Deg C + 32) * 4
// unsigned int errCode : 3;
time_boot_ms : Timestamp of the component clock since boot time in ms. (uint32_t)
seq : Sequence number of message (first set of 4 motors is #1, next 4 is #2, etc). (uint8_t)
num_motors : Total number of active ESCs/motors on the system. (uint8_t)
num_in_seq : Number of active ESCs in this sequence (1 through this many array members will be populated with data) (uint8_t)
escid : ESC/Motor ID (uint8_t)
status_age : Age of each ESC telemetry reading in ms compared to boot time. A value of 0xFFFF means timeout/no data. (uint16_t)
data_version : Version of data structure (determines contents). (uint8_t)
data0 : Data bits 1-32 for each ESC. (uint32_t)
data1 : Data bits 33-64 for each ESC. (uint32_t)
'''
return MAVLink_aq_esc_telemetry_message(time_boot_ms, seq, num_motors, num_in_seq, escid, status_age, data_version, data0, data1) | Sends ESC32 telemetry data for up to 4 motors. Multiple messages may
be sent in sequence when system has > 4 motors. Data
is described as follows:
// unsigned int state : 3;
// unsigned int vin : 12; // x 100
// unsigned int amps : 14; // x 100
// unsigned int rpm : 15;
// unsigned int duty : 8; // x (255/100)
// - Data Version 2 - //
unsigned int errors : 9; // Bad detects error
count // - Data Version 3
- // unsigned int temp
: 9; // (Deg C + 32) * 4
// unsigned int errCode : 3;
time_boot_ms : Timestamp of the component clock since boot time in ms. (uint32_t)
seq : Sequence number of message (first set of 4 motors is #1, next 4 is #2, etc). (uint8_t)
num_motors : Total number of active ESCs/motors on the system. (uint8_t)
num_in_seq : Number of active ESCs in this sequence (1 through this many array members will be populated with data) (uint8_t)
escid : ESC/Motor ID (uint8_t)
status_age : Age of each ESC telemetry reading in ms compared to boot time. A value of 0xFFFF means timeout/no data. (uint16_t)
data_version : Version of data structure (determines contents). (uint8_t)
data0 : Data bits 1-32 for each ESC. (uint32_t)
data1 : Data bits 33-64 for each ESC. (uint32_t) |
def _parse_msg(client, command, actor, args):
"""Parse a PRIVMSG or NOTICE and dispatch the corresponding event."""
recipient, _, message = args.partition(' :')
chantypes = client.server.features.get("CHANTYPES", "#")
if recipient[0] in chantypes:
recipient = client.server.get_channel(recipient) or recipient.lower()
else:
recipient = User(recipient)
client.dispatch_event(command, actor, recipient, message) | Parse a PRIVMSG or NOTICE and dispatch the corresponding event. |
def subscribe(self, queue=None, *levels):
"""
Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes
logs. Always use the returned queue name from this method, even if u specified the queue name to use
Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to
read from the same queue.
:param queue: Your unique queue name, otherwise, a one will get generated for your
:param levels:
:return: queue name to pull from
"""
args = {
'queue': queue,
'levels': list(levels),
}
self._subscribe_chk.check(args)
return self._client.json('logger.subscribe', args) | Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes
logs. Always use the returned queue name from this method, even if u specified the queue name to use
Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to
read from the same queue.
:param queue: Your unique queue name, otherwise, a one will get generated for your
:param levels:
:return: queue name to pull from |
def _add_observation(self, x_to_add, y_to_add):
"""Add observation to window, updating means/variance efficiently."""
self._add_observation_to_means(x_to_add, y_to_add)
self._add_observation_to_variances(x_to_add, y_to_add)
self.window_size += 1 | Add observation to window, updating means/variance efficiently. |
def lyricsmode(song):
"""
Returns the lyrics found in lyricsmode.com for the specified mp3 file or an
empty string if not found.
"""
translate = {
URLESCAPE: '',
' ': '_'
}
artist = song.artist.lower()
artist = normalize(artist, translate)
title = song.title.lower()
title = normalize(title, translate)
artist = re.sub(r'\_{2,}', '_', artist)
title = re.sub(r'\_{2,}', '_', title)
if artist[0:4].lower() == 'the ':
artist = artist[4:]
if artist[0:2].lower() == 'a ':
prefix = artist[2]
else:
prefix = artist[0]
url = 'http://www.lyricsmode.com/lyrics/{}/{}/{}.html'
url = url.format(prefix, artist, title)
soup = get_url(url)
content = soup.find(id='lyrics_text')
return content.get_text().strip() | Returns the lyrics found in lyricsmode.com for the specified mp3 file or an
empty string if not found. |
def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"Parse the keys and type them"
keys = parse_keys(keys, with_spaces, with_tabs, with_newlines)
for k in keys:
k.Run()
time.sleep(pause) | Parse the keys and type them |
def _detach_dim_scale(self, name):
"""Detach the dimension scale corresponding to a dimension name."""
for var in self.variables.values():
for n, dim in enumerate(var.dimensions):
if dim == name:
var._h5ds.dims[n].detach_scale(self._all_h5groups[dim])
for subgroup in self.groups.values():
if dim not in subgroup._h5group:
subgroup._detach_dim_scale(name) | Detach the dimension scale corresponding to a dimension name. |
def options(self):
""" Returns the options specified as argument to this command.
"""
if self._option_view is None:
self._option_view = Option.View(self)
return self._option_view | Returns the options specified as argument to this command. |
def all_points_mutual_reachability(X, labels, cluster_id,
metric='euclidean', d=None, **kwd_args):
"""
Compute the all-points-mutual-reachability distances for all the points of
a cluster.
If metric is 'precomputed' then assume X is a distance matrix for the full
dataset. Note that in this case you must pass in 'd' the dimension of the
dataset.
Parameters
----------
X : array (n_samples, n_features) or (n_samples, n_samples)
The input data of the clustering. This can be the data, or, if
metric is set to `precomputed` the pairwise distance matrix used
for the clustering.
labels : array (n_samples)
The label array output by the clustering, providing an integral
cluster label to each data point, with -1 for noise points.
cluster_id : integer
The cluster label for which to compute the all-points
mutual-reachability (which should be done on a cluster
by cluster basis).
metric : string
The metric used to compute distances for the clustering (and
to be re-used in computing distances for mr distance). If
set to `precomputed` then X is assumed to be the precomputed
distance matrix between samples.
d : integer (or None)
The number of features (dimension) of the dataset. This need only
be set in the case of metric being set to `precomputed`, where
the ambient dimension of the data is unknown to the function.
**kwd_args :
Extra arguments to pass to the distance computation for other
metrics, such as minkowski, Mahanalobis etc.
Returns
-------
mutual_reachaibility : array (n_samples, n_samples)
The pairwise mutual reachability distances between all points in `X`
with `label` equal to `cluster_id`.
core_distances : array (n_samples,)
The all-points-core_distance of all points in `X` with `label` equal
to `cluster_id`.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
if metric == 'precomputed':
if d is None:
raise ValueError('If metric is precomputed a '
'd value must be provided!')
distance_matrix = X[labels == cluster_id, :][:, labels == cluster_id]
else:
subset_X = X[labels == cluster_id, :]
distance_matrix = pairwise_distances(subset_X, metric=metric,
**kwd_args)
d = X.shape[1]
core_distances = all_points_core_distance(distance_matrix.copy(), d=d)
core_dist_matrix = np.tile(core_distances, (core_distances.shape[0], 1))
result = np.dstack(
[distance_matrix, core_dist_matrix, core_dist_matrix.T]).max(axis=-1)
return result, core_distances | Compute the all-points-mutual-reachability distances for all the points of
a cluster.
If metric is 'precomputed' then assume X is a distance matrix for the full
dataset. Note that in this case you must pass in 'd' the dimension of the
dataset.
Parameters
----------
X : array (n_samples, n_features) or (n_samples, n_samples)
The input data of the clustering. This can be the data, or, if
metric is set to `precomputed` the pairwise distance matrix used
for the clustering.
labels : array (n_samples)
The label array output by the clustering, providing an integral
cluster label to each data point, with -1 for noise points.
cluster_id : integer
The cluster label for which to compute the all-points
mutual-reachability (which should be done on a cluster
by cluster basis).
metric : string
The metric used to compute distances for the clustering (and
to be re-used in computing distances for mr distance). If
set to `precomputed` then X is assumed to be the precomputed
distance matrix between samples.
d : integer (or None)
The number of features (dimension) of the dataset. This need only
be set in the case of metric being set to `precomputed`, where
the ambient dimension of the data is unknown to the function.
**kwd_args :
Extra arguments to pass to the distance computation for other
metrics, such as minkowski, Mahanalobis etc.
Returns
-------
mutual_reachaibility : array (n_samples, n_samples)
The pairwise mutual reachability distances between all points in `X`
with `label` equal to `cluster_id`.
core_distances : array (n_samples,)
The all-points-core_distance of all points in `X` with `label` equal
to `cluster_id`.
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847). |
def get_node(self, index):
"""
Returns the Node at given index.
:param index: Index.
:type index: QModelIndex
:return: Node.
:rtype: AbstractCompositeNode
"""
index = self.mapToSource(index)
if not index.isValid():
return self.sourceModel().root_node
return index.internalPointer() or self.sourceModel().root_node | Returns the Node at given index.
:param index: Index.
:type index: QModelIndex
:return: Node.
:rtype: AbstractCompositeNode |
def __calculate_radius(self, number_neighbors, radius):
"""!
@brief Calculate new connectivity radius.
@param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius.
@param[in] radius (double): Current connectivity radius.
@return New connectivity radius.
"""
if (number_neighbors >= len(self._osc_loc)):
return radius * self.__increase_persent + radius;
return average_neighbor_distance(self._osc_loc, number_neighbors); | !
@brief Calculate new connectivity radius.
@param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius.
@param[in] radius (double): Current connectivity radius.
@return New connectivity radius. |
def prepare_boxlist(self, boxes, scores, image_shape):
"""
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
boxes = boxes.reshape(-1, 4)
scores = scores.reshape(-1)
boxlist = BoxList(boxes, image_shape, mode="xyxy")
boxlist.add_field("scores", scores)
return boxlist | Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`. |
async def load_cache_for_proof(self, archive: bool = False) -> int:
"""
Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to generate proof on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('HolderProver.load_cache_for_proof >>> archive: %s', archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_held())
for s_id in box_ids['schema_id']:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids['cred_def_id']:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids['rev_reg_id']:
await self.get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'HolderProver %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',
self.name,
self.pool.name,
rr_id,
rv)
except AbsentPool:
LOGGER.warning(
'HolderProver %s has no pool, cannot update revo cache reg delta for %s to %s',
self.name,
rr_id,
rv)
if archive:
ArchivableCaches.archive(self.dir_cache)
LOGGER.debug('HolderProver.load_cache_for_proof <<< %s', rv)
return rv | Load schema, cred def, revocation caches; optionally archive enough to go
offline and be able to generate proof on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:param archive: True to archive now or False to demur (subclasses may still
need to augment archivable caches further)
:return: cache load event timestamp (epoch seconds) |
def uri_query(self):
"""
Get the Uri-Query of a request.
:return: the Uri-Query
:rtype : String
:return: the Uri-Query string
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.URI_QUERY.number:
value.append(str(option.value))
return "&".join(value) | Get the Uri-Query of a request.
:return: the Uri-Query
:rtype : String
:return: the Uri-Query string |
def _post(self, path, **kwargs):
""" return a dict. """
# clean kwargs (filter None and empty string)
clean_kwargs = clean_dict(kwargs)
data = bytes(json.dumps(clean_kwargs), encoding='UTF-8')
# change content type on post
self._headers['Content-Type'] = 'application/json'
api = self._api('%s.json' % path)
req = request.Request(
api, headers=self._headers, data=data, method='POST')
try:
resp = request.urlopen(req, data).read()
except urllib.error.HTTPError as e:
resp = e.fp.read()
# reset content type
self._headers['Content-Type'] = 'text/json'
return json.loads(resp.decode()) | return a dict. |
def __get_stpd_filename(self):
''' Choose the name for stepped data file '''
if self.use_caching:
sep = "|"
hasher = hashlib.md5()
hashed_str = "cache version 6" + sep + \
';'.join(self.load_profile.schedule) + sep + str(self.loop_limit)
hashed_str += sep + str(self.ammo_limit) + sep + ';'.join(
self.load_profile.schedule) + sep + str(self.autocases)
hashed_str += sep + ";".join(self.uris) + sep + ";".join(
self.headers) + sep + self.http_ver + sep + ";".join(
self.chosen_cases)
hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type)
if self.load_profile.is_instances():
hashed_str += sep + str(self.instances)
if self.ammo_file:
opener = resource.get_opener(self.ammo_file)
hashed_str += sep + opener.hash
else:
if not self.uris:
raise RuntimeError("Neither ammofile nor uris specified")
hashed_str += sep + \
';'.join(self.uris) + sep + ';'.join(self.headers)
self.log.debug("stpd-hash source: %s", hashed_str)
hasher.update(hashed_str.encode('utf8'))
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
stpd = self.cache_dir + '/' + \
os.path.basename(self.ammo_file) + \
"_" + hasher.hexdigest() + ".stpd"
else:
stpd = os.path.realpath("ammo.stpd")
self.log.debug("Generated cache file name: %s", stpd)
return stpd | Choose the name for stepped data file |
def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
ignore_directories = lists.ignore_directories(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get IP Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get Binary Ignore Lists
hashlist = get_lists.GetLists()
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
# Perform rudimentary scans
scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey) | Generates blacklists / whitelists |
def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) | Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object. |
def await_reservations(self, sc, status={}, timeout=600):
"""Block until all reservations are received."""
timespent = 0
while not self.reservations.done():
logging.info("waiting for {0} reservations".format(self.reservations.remaining()))
# check status flags for any errors
if 'error' in status:
sc.cancelAllJobs()
sc.stop()
sys.exit(1)
time.sleep(1)
timespent += 1
if (timespent > timeout):
raise Exception("timed out waiting for reservations to complete")
logging.info("all reservations completed")
return self.reservations.get() | Block until all reservations are received. |
def priority_sort(list_, priority):
r"""
Args:
list_ (list):
priority (list): desired order of items
Returns:
list: reordered_list
CommandLine:
python -m utool.util_list --test-priority_argsort
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [2, 4, 6, 8, 10]
>>> priority = [8, 2, 6, 9]
>>> reordered_list = priority_sort(list_, priority)
>>> result = str(reordered_list)
>>> print(result)
[8, 2, 6, 4, 10]
"""
# remove requested priority items not in the list
priority_ = setintersect_ordered(priority, list_)
reordered_list = unique_ordered(priority_ + list_)
return reordered_list | r"""
Args:
list_ (list):
priority (list): desired order of items
Returns:
list: reordered_list
CommandLine:
python -m utool.util_list --test-priority_argsort
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> list_ = [2, 4, 6, 8, 10]
>>> priority = [8, 2, 6, 9]
>>> reordered_list = priority_sort(list_, priority)
>>> result = str(reordered_list)
>>> print(result)
[8, 2, 6, 4, 10] |
def get_or_create(cls, subscriber, livemode=djstripe_settings.STRIPE_LIVE_MODE):
"""
Get or create a dj-stripe customer.
:param subscriber: The subscriber model instance for which to get or create a customer.
:type subscriber: User
:param livemode: Whether to get the subscriber in live or test mode.
:type livemode: bool
"""
try:
return Customer.objects.get(subscriber=subscriber, livemode=livemode), False
except Customer.DoesNotExist:
action = "create:{}".format(subscriber.pk)
idempotency_key = djstripe_settings.get_idempotency_key("customer", action, livemode)
return cls.create(subscriber, idempotency_key=idempotency_key), True | Get or create a dj-stripe customer.
:param subscriber: The subscriber model instance for which to get or create a customer.
:type subscriber: User
:param livemode: Whether to get the subscriber in live or test mode.
:type livemode: bool |
def _outlierDetection_threaded(inputs):
'''
Detect the outlier
'''
[samples_idx, samples_x, samples_y_aggregation] = inputs
sys.stderr.write("[%s] DEBUG: Evaluating %dth of %d samples\n"\
% (os.path.basename(__file__), samples_idx + 1, len(samples_x)))
outlier = None
# Create a diagnostic regression model which removes the sample that we want to evaluate
diagnostic_regressor_gp = gp_create_model.create_model(\
samples_x[0:samples_idx] + samples_x[samples_idx + 1:],\
samples_y_aggregation[0:samples_idx] + samples_y_aggregation[samples_idx + 1:])
mu, sigma = gp_prediction.predict(samples_x[samples_idx], diagnostic_regressor_gp['model'])
# 2.33 is the z-score for 98% confidence level
if abs(samples_y_aggregation[samples_idx] - mu) > (2.33 * sigma):
outlier = {"samples_idx": samples_idx,
"expected_mu": mu,
"expected_sigma": sigma,
"difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)}
return outlier | Detect the outlier |
def deploy_sandbox_shared_setup(log, verbose=True, app=None, exp_config=None):
"""Set up Git, push to Heroku, and launch the app."""
if verbose:
out = None
else:
out = open(os.devnull, "w")
config = get_config()
if not config.ready:
config.load()
heroku.sanity_check(config)
(id, tmp) = setup_experiment(log, debug=False, app=app, exp_config=exp_config)
# Register the experiment using all configured registration services.
if config.get("mode") == "live":
log("Registering the experiment on configured services...")
registration.register(id, snapshot=None)
# Log in to Heroku if we aren't already.
log("Making sure that you are logged in to Heroku.")
heroku.log_in()
config.set("heroku_auth_token", heroku.auth_token())
log("", chevrons=False)
# Change to temporary directory.
cwd = os.getcwd()
os.chdir(tmp)
# Commit Heroku-specific files to tmp folder's git repo.
git = GitClient(output=out)
git.init()
git.add("--all")
git.commit('"Experiment {}"'.format(id))
# Initialize the app on Heroku.
log("Initializing app on Heroku...")
team = config.get("heroku_team", None)
heroku_app = HerokuApp(dallinger_uid=id, output=out, team=team)
heroku_app.bootstrap()
heroku_app.buildpack("https://github.com/stomita/heroku-buildpack-phantomjs")
# Set up add-ons and AWS environment variables.
database_size = config.get("database_size")
redis_size = config.get("redis_size")
addons = [
"heroku-postgresql:{}".format(quote(database_size)),
"heroku-redis:{}".format(quote(redis_size)),
"papertrail",
]
if config.get("sentry"):
addons.append("sentry")
for name in addons:
heroku_app.addon(name)
heroku_config = {
"aws_access_key_id": config["aws_access_key_id"],
"aws_secret_access_key": config["aws_secret_access_key"],
"aws_region": config["aws_region"],
"auto_recruit": config["auto_recruit"],
"smtp_username": config["smtp_username"],
"smtp_password": config["smtp_password"],
"whimsical": config["whimsical"],
}
heroku_app.set_multiple(**heroku_config)
# Wait for Redis database to be ready.
log("Waiting for Redis...")
ready = False
while not ready:
try:
r = redis.from_url(heroku_app.redis_url)
r.set("foo", "bar")
ready = True
except (ValueError, redis.exceptions.ConnectionError):
time.sleep(2)
log("Saving the URL of the postgres database...")
# Set the notification URL and database URL in the config file.
config.extend(
{
"notification_url": heroku_app.url + "/notifications",
"database_url": heroku_app.db_url,
}
)
config.write()
git.add("config.txt")
time.sleep(0.25)
git.commit("Save URLs for database and notifications")
time.sleep(0.25)
# Launch the Heroku app.
log("Pushing code to Heroku...")
git.push(remote="heroku", branch="HEAD:master")
log("Scaling up the dynos...")
size = config.get("dyno_type")
for process in ["web", "worker"]:
qty = config.get("num_dynos_" + process)
heroku_app.scale_up_dyno(process, qty, size)
if config.get("clock_on"):
heroku_app.scale_up_dyno("clock", 1, size)
time.sleep(8)
# Launch the experiment.
log("Launching the experiment on the remote server and starting recruitment...")
launch_data = _handle_launch_data("{}/launch".format(heroku_app.url), error=log)
result = {
"app_name": heroku_app.name,
"app_home": heroku_app.url,
"recruitment_msg": launch_data.get("recruitment_msg", None),
}
log("Experiment details:")
log("App home: {}".format(result["app_home"]), chevrons=False)
log("Recruiter info:")
log(result["recruitment_msg"], chevrons=False)
# Return to the branch whence we came.
os.chdir(cwd)
log("Completed deployment of experiment " + id + ".")
return result | Set up Git, push to Heroku, and launch the app. |
def mime_type(instance):
"""Ensure the 'mime_type' property of file objects comes from the Template
column in the IANA media type registry.
"""
mime_pattern = re.compile(r'^(application|audio|font|image|message|model'
'|multipart|text|video)/[a-zA-Z0-9.+_-]+')
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'file' and 'mime_type' in obj):
if enums.media_types():
if obj['mime_type'] not in enums.media_types():
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA registered MIME "
"Type of the form 'type/subtype'."
% (key, obj['mime_type']), instance['id'],
'mime-type')
else:
info("Can't reach IANA website; using regex for mime types.")
if not mime_pattern.match(obj['mime_type']):
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA MIME Type of the"
" form 'type/subtype'."
% (key, obj['mime_type']), instance['id'],
'mime-type') | Ensure the 'mime_type' property of file objects comes from the Template
column in the IANA media type registry. |
def _norm(self, x):
"""Return the norm of ``x``.
This method is intended to be private. Public callers should
resort to `norm` which is type-checked.
"""
return float(np.sqrt(self.inner(x, x).real)) | Return the norm of ``x``.
This method is intended to be private. Public callers should
resort to `norm` which is type-checked. |
def create_required_directories(self):
"""Creates any directories required for Engineer to function if they don't already exist."""
required = (self.CACHE_DIR,
self.LOG_DIR,
self.OUTPUT_DIR,
self.ENGINEER.JINJA_CACHE_DIR,)
for folder in required:
ensure_exists(folder, assume_dirs=True) | Creates any directories required for Engineer to function if they don't already exist. |
def delete(self, params, args, data):
# type: (str, dict, dict) -> None
"""
DELETE /resource/model_cls/[params]?[args]
delete resource/s
"""
ctx = self._create_context(params, args, data)
row_id = ctx.get_row_id()
if row_id is not None:
return self._delete_one(row_id, ctx)
else:
return self._delete_collection(ctx) | DELETE /resource/model_cls/[params]?[args]
delete resource/s |
def searchPhotos(self, title, **kwargs):
""" Search for a photo. See :func:`~plexapi.library.LibrarySection.search()` for usage. """
return self.search(libtype='photo', title=title, **kwargs) | Search for a photo. See :func:`~plexapi.library.LibrarySection.search()` for usage. |
def destroy(self):
"""Finish up a session.
"""
if self.session_type == 'bash':
# TODO: does this work/handle already being logged out/logged in deep OK?
self.logout()
elif self.session_type == 'vagrant':
# TODO: does this work/handle already being logged out/logged in deep OK?
self.logout() | Finish up a session. |
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates | Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix. |
def get_kafka_ssl_context():
"""
Returns an SSL context based on the certificate information in the Kafka config vars.
"""
# NOTE: We assume that Kafka environment variables are present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
#
# 1. Write the PEM certificates necessary for connecting to the Kafka brokers to physical
# files. The broker connection SSL certs are passed in environment/config variables and
# the python and ssl libraries require them in physical files. The public keys are written
# to short lived NamedTemporaryFile files; the client key is encrypted before writing to
# the short lived NamedTemporaryFile
#
# 2. Create and return an SSLContext for connecting to the Kafka brokers referencing the
# PEM certificates written above
#
# stash the kafka certs in named temporary files for loading into SSLContext. Initialize the
# SSLContext inside the with so when it goes out of scope the files are removed which has them
# existing for the shortest amount of time. As extra caution password
# protect/encrypt the client key
with NamedTemporaryFile(suffix='.crt') as cert_file, \
NamedTemporaryFile(suffix='.key') as key_file, \
NamedTemporaryFile(suffix='.crt') as trust_file:
cert_file.write(os.environ['KAFKA_CLIENT_CERT'].encode('utf-8'))
cert_file.flush()
# setup cryptography to password encrypt/protect the client key so it's not in the clear on
# the filesystem. Use the generated password in the call to load_cert_chain
passwd = standard_b64encode(os.urandom(33))
private_key = serialization.load_pem_private_key(
os.environ['KAFKA_CLIENT_CERT_KEY'].encode('utf-8'),
password=None,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(passwd)
)
key_file.write(pem)
key_file.flush()
trust_file.write(os.environ['KAFKA_TRUSTED_CERT'].encode('utf-8'))
trust_file.flush()
# create an SSLContext for passing into the kafka provider using the create_default_context
# function which creates an SSLContext with protocol set to PROTOCOL_SSLv23, OP_NO_SSLv2,
# and OP_NO_SSLv3 when purpose=SERVER_AUTH.
ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=trust_file.name)
ssl_context.load_cert_chain(cert_file.name, keyfile=key_file.name, password=passwd)
# Intentionally disabling hostname checking. The Kafka cluster runs in the cloud and Apache
# Kafka on Heroku doesn't currently provide stable hostnames. We're pinned to a specific certificate
# for this connection even though the certificate doesn't include host information. We rely
# on the ca trust_cert for this purpose.
ssl_context.check_hostname = False
return ssl_context | Returns an SSL context based on the certificate information in the Kafka config vars. |
def url(self):
"""Return the path subcomponent of the url to this object.
For example: "/computers/id/451"
"""
if self.id:
url = "%s%s%s" % (self._url, self.id_url, self.id)
else:
url = None
return url | Return the path subcomponent of the url to this object.
For example: "/computers/id/451" |
def grab_focus(self):
"""grab window's focus. Keyboard and scroll events will be forwarded
to the sprite who has the focus. Check the 'focused' property of sprite
in the on-render event to decide how to render it (say, add an outline
when focused=true)"""
scene = self.get_scene()
if scene and scene._focus_sprite != self:
scene._focus_sprite = self | grab window's focus. Keyboard and scroll events will be forwarded
to the sprite who has the focus. Check the 'focused' property of sprite
in the on-render event to decide how to render it (say, add an outline
when focused=true) |
def write_template(data, results_dir, parent):
"""Write the html template
:param dict data: the dict containing all data for output
:param str results_dir: the ouput directory for results
:param str parent: the parent directory
"""
print("Generating html report...")
partial = time.time()
j_env = Environment(loader=FileSystemLoader(os.path.join(results_dir, parent, 'templates')))
template = j_env.get_template('report.html')
report_writer = ReportWriter(results_dir, parent)
report_writer.write_report(template.render(data))
print("HTML report generated in {} seconds\n".format(time.time() - partial)) | Write the html template
:param dict data: the dict containing all data for output
:param str results_dir: the ouput directory for results
:param str parent: the parent directory |
def expected(self, dynamizer):
""" Get the expected values for the update """
if self._expect_kwargs:
return encode_query_kwargs(dynamizer, self._expect_kwargs)
if self._expected is not NO_ARG:
ret = {}
if is_null(self._expected):
ret['Exists'] = False
else:
ret['Value'] = dynamizer.encode(self._expected)
ret['Exists'] = True
return {self.key: ret}
return {} | Get the expected values for the update |
def summary(args):
"""
%prog summary *.fasta
Report real bases and N's in fastafiles in a tabular report
"""
from jcvi.utils.natsort import natsort_key
p = OptionParser(summary.__doc__)
p.add_option("--suffix", default="Mb",
help="make the base pair counts human readable [default: %default]")
p.add_option("--ids",
help="write the ids that have >= 50% N's [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
idsfile = opts.ids
header = "Seqid Real N's Total %_real".split()
if idsfile:
idsfile = open(idsfile, "w")
nids = 0
data = []
for fastafile in args:
for rec in SeqIO.parse(fastafile, "fasta"):
seqlen = len(rec)
nns = rec.seq.count('n') + rec.seq.count('N')
reals = seqlen - nns
pct = reals * 100. / seqlen
pctreal = "{0:.1f}%".format(pct)
if idsfile and pct < 50:
nids += 1
print(rec.id, file=idsfile)
data.append((rec.id, reals, nns, seqlen, pctreal))
data.sort(key=natsort_key)
ids, reals, nns, seqlen, pctreal = zip(*data)
reals = sum(reals)
nns = sum(nns)
seqlen = sum(seqlen)
pctreal = "{0:.1f}%".format(reals * 100. / seqlen)
data.append(("Total", reals, nns, seqlen, pctreal))
write_csv(header, data, sep=" ", filename=opts.outfile, thousands=True)
if idsfile:
logging.debug("A total of {0} ids >= 50% N's written to {1}.".\
format(nids, idsfile.name))
idsfile.close()
return reals, nns, seqlen | %prog summary *.fasta
Report real bases and N's in fastafiles in a tabular report |
def _process_params(self, params):
""" Converts Unicode/lists/booleans inside HTTP parameters """
processed_params = {}
for key, value in params.items():
processed_params[key] = self._process_param_value(value)
return processed_params | Converts Unicode/lists/booleans inside HTTP parameters |
def percentiles(self, percentiles):
"""Given a list of percentiles (floats between 0 and 1), return a
list of the values at those percentiles, interpolating if
necessary."""
try:
scores = [0.0]*len(percentiles)
if self.count > 0:
values = self.samples()
values.sort()
for i in range(len(percentiles)):
p = percentiles[i]
pos = p * (len(values) + 1)
if pos < 1:
scores[i] = values[0]
elif pos > len(values):
scores[i] = values[-1]
else:
upper, lower = values[int(pos - 1)], values[int(pos)]
scores[i] = lower + (pos - floor(pos)) * (upper - lower)
return scores
except IndexError:
return [float('NaN')] * len(percentiles) | Given a list of percentiles (floats between 0 and 1), return a
list of the values at those percentiles, interpolating if
necessary. |
def pluck(self, key):
"""
Convenience version of a common use case of
`map`: fetching a property.
"""
return self._wrap([x.get(key) for x in self.obj]) | Convenience version of a common use case of
`map`: fetching a property. |
def console_script():
'''
The main entry point for the production
administration script 'opensubmit-exec',
installed by setuptools.
'''
if len(sys.argv) == 1:
print("opensubmit-exec [configcreate <server_url>|configtest|run|test <dir>|unlock|help] [-c config_file]")
return 0
if "help" in sys.argv[1]:
print("configcreate <server_url>: Create initial config file for the OpenSubmit executor.")
print("configtest: Check config file for correct installation of the OpenSubmit executor.")
print("run: Fetch and run code to be tested from the OpenSubmit web server. Suitable for crontab.")
print("test <dir>: Run test script from a local folder for testing purposes.")
print("unlock: Break the script lock, because of crashed script.")
print("help: Print this help")
print(
"-c config_file Configuration file to be used (default: {0})".format(CONFIG_FILE_DEFAULT))
return 0
# Translate legacy commands
if sys.argv[1] == "configure":
sys.argv[1] = 'configtest'
config_fname = get_config_fname(sys.argv)
if "configcreate" in sys.argv[1]:
print("Creating config file at " + config_fname)
server_url = sys.argv[2]
if create_config(config_fname, override_url=server_url):
print("Config file created, fetching jobs from " + server_url)
return 0
else:
return 1
if "configtest" in sys.argv[1]:
print("Testing config file at " + config_fname)
if has_config(config_fname):
config = read_config(config_fname)
if not check_config(config):
return 1
else:
print("ERROR: Seems like the config file %s does not exist. Call 'opensubmit-exec configcreate <server_url>' first." %
config_fname)
return 1
print("Sending host information update to server ...")
send_hostinfo(config)
return 0
if "unlock" in sys.argv[1]:
config = read_config(config_fname)
break_lock(config)
return 0
if "run" in sys.argv[1]:
config = read_config(config_fname)
# Perform additional precautions for unattended mode in cron
kill_longrunning(config)
with ScriptLock(config):
download_and_run(config)
return 0
if "test" in sys.argv[1]:
config = read_config(config_fname)
copy_and_run(config, sys.argv[2])
return 0 | The main entry point for the production
administration script 'opensubmit-exec',
installed by setuptools. |
def registerTzinfo(obj, tzinfo):
"""
Register tzinfo if it's not already registered, return its tzid.
"""
tzid = obj.pickTzid(tzinfo)
if tzid and not getTzid(tzid, False):
registerTzid(tzid, tzinfo)
return tzid | Register tzinfo if it's not already registered, return its tzid. |
def diff(s1, s2):
"""
Return a normalised Levenshtein distance between two strings.
Distance is normalised by dividing the Levenshtein distance of the
two strings by the max(len(s1), len(s2)).
Examples:
>>> text.diff("foo", "foo")
0
>>> text.diff("foo", "fooo")
1
>>> text.diff("foo", "")
1
>>> text.diff("1234", "1 34")
1
Arguments:
s1 (str): Argument A.
s2 (str): Argument B.
Returns:
float: Normalised distance between the two strings.
"""
return levenshtein(s1, s2) / max(len(s1), len(s2)) | Return a normalised Levenshtein distance between two strings.
Distance is normalised by dividing the Levenshtein distance of the
two strings by the max(len(s1), len(s2)).
Examples:
>>> text.diff("foo", "foo")
0
>>> text.diff("foo", "fooo")
1
>>> text.diff("foo", "")
1
>>> text.diff("1234", "1 34")
1
Arguments:
s1 (str): Argument A.
s2 (str): Argument B.
Returns:
float: Normalised distance between the two strings. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.