content
stringlengths 42
6.51k
|
|---|
def increase_config(config, amount=1):
"""Generate a new configuration that applies the given configuration
more aggressively.
"""
out = []
for ident, param in config:
if param:
out.append((ident, param + amount))
else:
out.append((ident, param))
return tuple(out)
|
def intify(obj):
"""
Takes an object that is a recursive composition of primitive types, lists and dictionaries, and returns an
equivalent object where every `float` number that is actually an integer is replaced with the corresponding
:obj:`int`.
Args:
obj: an object as specified.
Returns:
The modified version of ``obj``.
"""
if isinstance(obj, list):
return [intify(x) for x in obj]
elif isinstance(obj, dict):
return {k: intify(v) for k, v in obj.items()}
elif isinstance(obj, float) and obj.is_integer():
return int(obj)
else:
return obj
|
def parse_textarea_ids_or_names(textarea_contents):
"""
Prepares text to go through format_records().
Note that textarea_contents is going to be a unicode type as flask builds it
that way. Output is plain bytes (str type) as the rest of our code
does not assume unicode types.
Parameters
textarea_contents : the text pasted into the textarea.
Returns
list of strings, each holding a line of the input.
Unicode types are transformed to bytes.
"""
return textarea_contents.encode('utf-8').splitlines()
|
def hex_fig(n, uppercase=True):
"""
Return the hexadecimal figure of `n`.
:param n: 0 <= int < 16
:param uppercase: bool
:return: str (one character from 0123456789ABCDEF or 0123456789abcdef)
"""
assert isinstance(n, int), type(n)
assert 0 <= n < 16
assert isinstance(uppercase, bool), type(uppercase)
return (str(n) if n < 10
else chr((ord('A' if uppercase
else 'a')
+ n - 10)))
|
def verify_spacer(reads, umis, spacer_seq):
"""Returns true if spacer is present."""
for read in reads:
for umi in umis:
if umi in read:
if not read.split(umi)[1].startswith(spacer_seq):
return False
return True
|
def _get_category(dataset):
"""Get the category from available layer information"""
category = [k for k in dataset['keywords']
if k.startswith('category:')]
if not category:
return "Other"
else:
return category[0].split(":")[1]
|
def get_type(a):
"""
Extract a's type. First, try to extract a's class name. If this
is unsuccessful, return type(a).
The session below illustrates differences between type(a) nad
get_type(a) for standard classes, instances of user-defined classes,
and class objects (new style and classic classes).
>>> # standard built-in types:
>>> c = [1,2,3]
>>> d = 'my string'
>>> type(c)
<type 'list'>
>>> get_type(c)
'list'
>>> type(d)
<type 'str'>
>>> get_type(d)
'str'
>>>
>>> # user-defined classes and instances:
>>> class A: # classic class
... pass
...
>>> class B(object): # new style class
... pass
...
>>> a = A()
>>> type(a)
<type 'instance'>
>>> get_type(a)
'A'
>>>
>>> b = B()
>>> type(b)
<class '__main__.B'>
>>> get_type(b)
'B'
>>>
>>> # class objects A and B:
>>> type(A)
<type 'classobj'>
>>> get_type(A)
'classobj (i.e. a class object)'
>>> type(B)
<type 'type'>
>>> get_type(B)
'type (i.e. a class object)'
>>>
"""
try:
# try to get a's class name (if possible)
tp = a.__class__.__name__
if tp == 'type': # new style class object?
# add some explanation (can get output "a is of type type")
tp += ' (i.e. a class object)'
except:
# rely on the type (would be less informative if a is instance)
tp = str(type(a))
# if a is a classic class object, tp is "<type 'classobj'>"
# which we translate into something more readable:
if tp == "<type 'classobj'>":
tp = "classobj (i.e. a class object)"
return tp
|
def group_by_first(pairs):
"""Return a list of pairs that relates each unique key in [key, value]
pairs to a list of all values that appear paired with that key.
Arguments:
pairs -- a sequence of pairs
>>> example = [ [1, 2], [3, 2], [2, 4], [1, 3], [3, 1], [1, 2] ]
>>> group_by_first(example)
[[2, 3, 2], [2, 1], [4]]
"""
# Optional: This implementation is slow because it traverses the list of
# pairs one time for each key. Can you improve it?
keys = []
for key, _ in pairs:
if key not in keys:
keys.append(key)
return [[y for x, y in pairs if x == key] for key in keys]
|
def gen_mlsag_assert(pk, xx, kLRki, mscout, index, dsRows):
"""
Conditions check for gen_mlsag_ext.
:param pk:
:param xx:
:param kLRki:
:param mscout:
:param index:
:param dsRows:
:return:
"""
cols = len(pk)
if cols <= 1:
raise ValueError("Cols == 1")
if index >= cols:
raise ValueError("Index out of range")
rows = len(pk[0])
if rows == 0:
raise ValueError("Empty pk")
for i in range(cols):
if len(pk[i]) != rows:
raise ValueError("pk is not rectangular")
if len(xx) != rows:
raise ValueError("Bad xx size")
if dsRows > rows:
raise ValueError("Bad dsRows size")
if (not kLRki or not mscout) and (kLRki or mscout):
raise ValueError("Only one of kLRki/mscout is present")
if kLRki and dsRows != 1:
raise ValueError("Multisig requires exactly 1 dsRows")
return rows, cols
|
def get_message(line, emote):
"""
Returns the message.
Args:
line (binary): Line which we parse.
emote (bool): Flag to determine if there is a emote in the line.
"""
_line = str(line)
# `i` and `j` are indexes for where we can fetch the username. If a
# emote is invloved it adds a bunch of flags wich messes up the original
# parsing. Definitly a hacky implementation but it is fine for now.)
i, j = 2, 2
if emote:
i = i + 1
j = j + 1
s = _line.split(':', j)
try:
return s[i].replace("\\r'", '')
except IndexError:
return ''
|
def d_get_delta_time(timebase_scale=1.0, i_ns=120):
"""Given the DS1054Z timebase scale, calculate the delta time between samples"""
return (12. * float(timebase_scale)) / float(i_ns)
|
def fix_name(team_name):
"""Expand team names from the values in JSON"""
if 'wings' in team_name:
team_name = 'Red Wings'
elif 'jackets' in team_name:
team_name = 'Blue Jackets'
elif 'leafs' in team_name:
team_name = 'Maple Leafs'
elif 'knights' in team_name:
team_name = 'Golden Knights'
return team_name.title()
|
def astype(value, types=None):
"""Return argument as one of types if possible."""
if value[0] in '\'"':
return value[1:-1]
if types is None:
types = int, float, str
for typ in types:
try:
return typ(value)
except (ValueError, TypeError, UnicodeEncodeError):
pass
return value
|
def selection_sort(items):
"""
Sort items with a selection sort.
"""
for index, item in enumerate(items):
current_smallest = index
for search_index in range(index + 1, len(items)):
if items[search_index] < items[current_smallest]:
current_smallest = search_index
temp = items[index]
items[index] = items[current_smallest]
items[current_smallest] = temp
return items
|
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
|
def get_absolute_import_name(dir_path: str, import_name: str) -> str:
"""Joins a relative import path with an import name."""
return f'{dir_path}:{import_name}'
|
def autocomplete(term, search_terms) -> str:
"""
Validate search term and return complete name, e.g. autocomplete('subj') == 'subject'
"""
term = term.lower()
# Check if term already complete
if term in search_terms:
return term
full_key = (x for x in search_terms if x.lower().startswith(term))
key_ = next(full_key, None)
if not key_:
raise ValueError(f'Invalid search term "{term}", see `one.search_terms()`')
elif next(full_key, None):
raise ValueError(f'Ambiguous search term "{term}"')
return key_
|
def rgb_int_to_float(color):
"""
Turns an integer color in 0-255 range into a 0-1 float range
:param color: tuple(int, int, int, int), color in 0-255 range
:return: tuple(float, float, float, float) color in 0-1 range
"""
return tuple([color_channel / 255.0 for color_channel in color])
|
def dms_to_decimal(degrees, minutes, seconds, sign=' '):
"""Convert degrees, minutes, seconds into decimal degrees.
>>> dms_to_decimal(10, 10, 10)
10.169444444444444
>>> dms_to_decimal(8, 9, 10, 'S')
-8.152777777777779
"""
return (-1 if sign[0] in 'SWsw' else 1) * (
float(degrees) +
float(minutes) / 60 +
float(seconds) / 3600
)
|
def remove_nones(X):
""" Removes all Nones from a list and replaces them by zero """
return [[[0. if v is 'None' else v for v in d] for d in data] for data in X]
|
def pyx_is_cplus(path):
"""
Inspect a Cython source file (.pyx) and look for comment line like:
# distutils: language = c++
Returns True if such a file is present in the file, else False.
"""
for line in open(path, 'rt'):
if line.startswith('#') and '=' in line:
splitted = line.split('=')
if len(splitted) != 2:
continue
lhs, rhs = splitted
if lhs.strip().split()[-1].lower() == 'language' and \
rhs.strip().split()[0].lower() == 'c++':
return True
return False
|
def leaf_name_to_bacterial_phylum(leaf_name: str) -> str:
"""
'ASV_0053; d__Bacteria; p__Bacteroidetes; c__Bacteroidia; o__Bacteroidales' -> 'Bacteroidetes'
"""
if '__Bacteria;' not in leaf_name:
return 'Unassigned'
if '; p__' not in leaf_name:
return 'Unassigned'
else:
return leaf_name.split('; p__')[1].split(';')[0]
|
def build_etrans_dct(spc_dct_i):
""" Build an energy transfer dict from a spc dct
"""
etrans_dct = {}
mass = spc_dct_i.get('mass', None)
if mass is not None:
etrans_dct['mass'] = mass
ljpar = spc_dct_i.get('lj', None)
if ljpar is not None:
etrans_dct['lj'] = ljpar
edown = spc_dct_i.get('edown', None)
if edown is not None:
etrans_dct['edown'] = edown
return etrans_dct
|
def uri_sort_key(uri):
"""return a sort key for the given URI, based on whether it represents the primary work in the record"""
if uri.startswith('http://urn.fi/URN:NBN:fi:bib:me:'):
priority = int(uri[-2:]) # last two digits are 00 for the primary work, 01+ for other works mentioned
else:
priority = -1 # higher priority for e.g. authorized agents
return (priority, uri)
|
def test_no_conflict(N):
"""A 1-SAT problem that requires N variables to all be true"""
return [[i+1] for i in range(N)]
|
def _is_barcode_gc_compatible(barcode, min_gc=0.20, max_gc=0.80):
"""check GC content
"""
gc_count = barcode.count("G") + barcode.count("C")
gc_fract = gc_count / float(len(barcode))
if gc_fract < min_gc:
return False
if gc_fract > max_gc:
return False
return True
|
def get_publication_year(raw):
"""
Extract publication year.
@param raw: json object of a Libris edition
@type raw: dictionary
"""
publication = raw["mainEntity"].get("publication")
primary = [x for x in publication if
x["@type"] == "PrimaryPublication"]
if primary:
return [x["year"] for x in primary][0]
return None
|
def Luhn (number):
""" function Luhn validates a number against the Luhn checksum formula
- function call: Luhn (number)
number has to be an integer, digits only
- output: True | False
source: https://en.wikipedia.org/wiki/Luhn_algorithm
Press q to exit. """
result = False
i = 0
LuhnSum = 0
for digitC in reversed(str(number)):
i = i+1
digitN = int(digitC)
if (i%2 == 0):
if (2*digitN >=10):
LuhnSum = LuhnSum+2*digitN-9
else:
LuhnSum = LuhnSum+2*digitN
else:
LuhnSum = LuhnSum+digitN
result= (LuhnSum%10 == 0)
return result
|
def _combine(**kw):
"""
Combine two table dictionaries used in a join to produce a single dictionary
that can be used in formatting.
"""
result = {}
for tableRole, tableDictionary in kw.items():
result.update([("%s:%s" % (tableRole, k), v)
for k, v in tableDictionary.items()])
return result
|
def pivot_index(row):
"""
Returns the index of pivot in a row
"""
counter = 0
for element in row:
if element != float(0):
return counter
counter += 1
return counter
|
def capitalize(s):
"""
Capitalize a string - only first letter
Args:
s (string): The input string to be capitalized.
Returns:
(string): The capitalized string.
"""
return s[0:1].upper() + s[1:]
|
def abbr_fqdn(origin: str, name: str, *, prefix: str = '') -> str:
"""Abbreviate fully-qualified Python name, by removing origin.
``app.origin`` is the package where the app is defined,
so if this is ``examples.simple``::
>>> app.origin
'examples.simple'
>>> abbr_fqdn(app.origin, 'examples.simple.Withdrawal', prefix='[...]')
'[...]Withdrawal'
>>> abbr_fqdn(app.origin, 'examples.other.Foo', prefix='[...]')
'examples.other.foo'
:func:`shorten_fqdn` is similar, but will always shorten a too long name,
abbr_fqdn will only remove the origin portion of the name.
"""
if name.startswith(origin):
name = name[len(origin) + 1:]
return f'{prefix}{name}'
return name
|
def unitify(quantity, unit):
"""Format an amount with units specifier handling options plural 's'"""
return '{} {}'.format(quantity, unit if quantity == 1 else unit + 's')
|
def divmod_min(a, b):
"""
return q,r such that a = qb + r, with minimum |r|
"""
q, r = divmod(a, b)
# we will want to adjust r if
# (|r| > |b/2|), which is equivalent to checking
# (|2r| > |b|),
# (|r| > |b| - |r|)
# then using the fact that for python,
# divmod will give |r| < |b| and r,b will have the same sign
# (|r| > |b - r|)
diff = b - r
if abs(r) > abs(diff):
q = q + 1
r = -diff
return q,r
|
def is_false(v, chk_none: bool = True) -> bool:
"""
**Warning:** Unless you specifically need to verify a value is Falsey, it's usually safer to
check for truth :py:func:`.is_true` and invert the result, i.e. ``if not is_true(v)``
Check if a given bool/str/int value is some form of ``False``:
* **bool**: ``False``
* **str**: ``'false'``, ``'no'``, ``'n'``, ``'0'``
* **int**: ``0``
If ``chk_none`` is True (default), will also consider the below values to be Falsey::
boolean: None // string: 'null', 'none', ''
(note: strings are automatically .lower()'d)
Usage:
>>> is_false(0)
True
>>> is_false('yes')
False
:param Any v: The value to check for falseyness
:param bool chk_none: If ``True``, treat ``None``/``'none'``/``'null'`` as Falsey (default ``True``)
:return bool is_False: ``True`` if the value appears to be falsey, otherwise ``False``.
"""
v = v.lower() if type(v) is str else v
chk = [False, 'false', 'no', 'n', '0', 0]
chk += [None, 'none', 'null', ''] if chk_none else []
return v in chk
|
def clean_up_command_output(raw_command_data):
""" clean up command output
restructures the registered output data """
# initialize vrf command output
vrf_command_output = []
# skip if no data available in the raw command data
if 'results' not in raw_command_data:
return vrf_command_output
# otherwise iterate through the raw command data
for result in raw_command_data['results']:
# initialize relevant values
command = result['item']
failed = result['failed']
output = ''
# save the output if there was no failure in execution
if 'stdout' in result:
# update output
output = result['stdout'][0]
# save failure message as well in case command failed to execute
elif 'msg' in result:
# update output
output = result['msg']
# show command information
show_command_info = {
'command': command,
'output': output,
'failed': failed,
}
# append show command information to vrf command output
vrf_command_output.append(show_command_info)
return vrf_command_output
|
def _get_mod97_value(to_check, characters=None, mod=97):
"""Method that calculates a check digit based on the formula used for the mod97
check digit method. This method replaces all non-numeric values in the string with
two digits (i.e. A=10, B=11, etc) and then calculates a modulus of this value.
"""
digits = ""
for character in to_check:
if characters and character in characters:
digits += characters
elif character.isdigit():
digits += character
elif character.isalpha() and character.isupper():
digits += str(ord(character) - ord('A') + 10)
else:
raise ValueError("not a valid character: %s" % character)
return int(digits) % mod
|
def convert_standard_datestamp(entry):
"""Set date and time attributes based on a standard date stamp"""
# Get the date stamp (without year)
months = {'Jan': '01', 'Feb': '02', 'Mar': '03',
'Apr': '04', 'May': '05', 'Jun': '06',
'Jul': '07', 'Aug': '08', 'Sep': '09',
'Oct': '10', 'Nov': '11', 'Dec': '12'}
datelist = entry['date_stamp'].split(' ')
try:
datelist.remove('')
except ValueError:
pass
intmonth = months[datelist[0].strip()]
daydate = datelist[1].strip().zfill(2)
timestring = datelist[2].replace(':','')
datestampnoyear = intmonth + daydate + timestring
# Set attributes:
entry['year'] = None
entry['month'] = intmonth
entry['day'] = daydate
entry['tstamp'] = timestring
entry['tzone'] = None
return entry
|
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
|
def rel_index(elem, lst, default=None):
"""`lst.index(elem) / len(lst)` with fallback."""
try:
return lst.index(elem) / len(lst)
except ValueError:
if default == None:
raise
return default
|
def invalid_request_body_with_non_existent_client(valid_staff_model):
"""
A fixture for creating a request body with non-existent client id.
Args:
valid_staff_model (Model): a valid staff model created by a fixture.
"""
return {
'title': 'Improve customer care services',
'description':
'The current customer care services are reported to '
'be abysmal with representatives dropping calls on customer or '
'being rather unpleasant.',
'product_area': 'POLICIES',
'target_date': '2019-10-05T00:00:00Z',
'priority': 1,
'staff_id': 1,
'client_id': 1,
}
|
def TBool(val):
"""Checks if the given value is a boolean.
"""
return isinstance(val, bool)
|
def clean_string(s):
"""
Get a string into a canonical form - no whitespace at either end,
no newlines, no double-spaces.
"""
return s.strip().replace("\n", " ").replace(" ", " ")
|
def parse_shader_error( error ):
"""Parses a single GLSL error and extracts the line number and error
description.
Line number and description are returned as a tuple.
GLSL errors are not defined by the standard, as such,
each driver provider prints their own error format.
Nvidia print using the following format::
0(7): error C1008: undefined variable "MV"
Nouveau Linux driver using the following format::
0:28(16): error: syntax error, unexpected ')', expecting '('
ATi and Intel print using the following format::
ERROR: 0:131: '{' : syntax error parse error
"""
import re
# Nvidia
# 0(7): error C1008: undefined variable "MV"
match = re.match( r'(\d+)\((\d+)\):\s(.*)', error )
if match:
return (
int(match.group( 2 )), # line number
match.group( 3 ) # description
)
# ATI
# Intel
# ERROR: 0:131: '{' : syntax error parse error
match = re.match( r'ERROR:\s(\d+):(\d+):\s(.*)', error )
if match:
return (
int(match.group( 2 )), # line number
match.group( 3 ) # description
)
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
match = re.match( r'(\d+):(\d+)\((\d+)\):\s(.*)', error )
if match:
return (
int(match.group( 2 )), # line number
match.group( 4 ) # description
)
raise ValueError( 'Unknown GLSL error format' )
|
def mid_point(x1, y1, x2, y2):
"""
mid_point will find the mid point of a line segment when four coordinates are given.
It will return the mid point as a tuple.
:param x1: x-coordinate of left vertex
:param y1: y-coordinate of left vertex
:param x2: x-coordinate of right vertex
:param y2: y-coordinate of right vertex
:return: mid point of the line as tuple
"""
# Find the midpoint of given x, y coordinates
midpoint = ((x1 + x2) / 2, (y1 + y2) / 2)
# Return mid point as tuple
return midpoint
|
def get_by_py_path(py_path):
"""
Imports and returns a python callable.
Keyword arguments:
py_path -- callable to load
"""
parts = py_path.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
|
def elfhash(s):
"""
:param string: bytes
>>> import base64
>>> s = base64.b64encode(b'hello world')
>>> elfhash(s)
224648685
"""
hash = 0
x = 0
for c in s:
hash = (hash << 4) + c
x = hash & 0xF0000000
if x:
hash ^= (x >> 24)
hash &= ~x
return (hash & 0x7FFFFFFF)
|
def normalized2KITTI(box):
"""
convert Bbox format
:param box: [X, Y, width, height]
:return: [xmin, ymin, xmax, ymax]
"""
o_x, o_y, o_width, o_height = box
xmin = int(o_x)
ymin = int(o_y)
xmax = int(o_x + o_width)
ymax = int(o_y + o_height)
return [xmin, ymin, xmax, ymax]
|
def basename(fullyqualname: str) -> str:
"""Remove a name's namespace, so it can be used in a __qualname__.
It may seem wrong that the fully qualified PHP name is used as __name__,
and the unqualified PHP name is used as __qualname__, but Python's
qualified names shouldn't include the module name.
"""
return fullyqualname.rpartition('\\')[2]
|
def parse_gav(gav, defaults=(None, None, None)):
"""Parses the given GAV as a tuple.
gav
the GAV to parse. It must be a string with two colons separating the
GAV parts.
defaults
a triple of default coordinates to return if a part from the input is
missing
Returns:
a triple with the parsed GAV
"""
parts = gav.split(':')
if len(parts) != 3:
raise ValueError(f"Not a valid GAV pattern: {gav}")
for index, value in enumerate(parts):
if len(value) == 0:
parts[index] = defaults[index]
return tuple(parts)
|
def fib(n: int):
"""Recursive function for fibonacci sequence."""
if n < 2:
return n
else:
return fib(n - 1) + fib(n - 2)
|
def is_ascending(numbers):
"""Returns whether the given list of numbers is in ascending order."""
for i in range(len(numbers) - 1):
if numbers[i+1] < numbers[i]:
return False
return True
|
def list_uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
>>> uniq([9,0,2,1,0])
[9, 0, 2, 1]
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
>>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
['Foo', 'bar']
"""
key = key or (lambda x: x)
seen = set()
result = []
for v in seq:
k = key(v)
if k in seen:
continue
seen.add(k)
result.append(v)
return result
|
def get_reader_pairs(reader):
"""
Return the set of alphabetically-sorted word (str) tuples
in `reader`
"""
return {tuple(sorted([w1, w2])): score for w1, w2, score in reader()}
|
def relu(x):
"""
:math:`f(x) =` x if x is greater then y else 0
(See https://en.wikipedia.org/wiki/Rectifier_(neural_networks).)
"""
return x if x > 0. else 0.
|
def _deduplicate_names(names):
"""Ensure there are no duplicates in ``names``
This is done by iteratively adding ``_<N>`` to the name for increasing N
until the name is unique.
"""
new_names = []
existing_names = set()
for name in names:
base_name = name + '_'
i = 1
while name in existing_names:
# Iterate until a unique name is found
name = base_name + str(i)
i += 1
new_names.append(name)
existing_names.add(name)
return new_names
|
def values_dict(items):
"""Given a list of (key, list) values returns a dictionary where
single-element lists have been replaced by their sole value.
"""
return {k: v[0] if len(v) == 1 else v for k, v in items}
|
def is_xfailed(xfail_args, compatible_version, platform, swift_branch):
"""Return whether the specified platform/swift_branch is xfailed."""
xfail = xfail_args['compatibility'].get(compatible_version, {})
if '*' in xfail:
return xfail['*'].split()[0]
if '*' in xfail.get('branch', {}):
return xfail['branch']['*'].split()[0]
if '*' in xfail.get('platform', {}):
return xfail['platform']['*'].split()[0]
if swift_branch in xfail.get('branch', {}):
return xfail['branch'][swift_branch].split()[0]
if platform in xfail.get('platform', {}):
return xfail['platform'][platform].split()[0]
return None
|
def get_severity(data):
"""Convert level value to severity"""
if data == "warning":
return "Medium"
if data == "error":
return "Critical"
return "Info"
|
def output_mode_adv(session, Type='Int32', RepCap='', AttrID=1150051, buffsize=0, action=['Get', '']):
"""[Advance Output Mode <int32>]
Sets/Gets the output mode, which may be Arbitrary Waveform, Arbitrary Sequence, or Advanced Sequence.
In Arbitrary Waveform mode, the generator outputs a single waveform.
In the other two modes, sequences of waveforms are output.
Attribute value (mode):
1: AGM933X_VAL_OUTPUT_MODE_ARBITRARY
2: AGM933X_VAL_OUTPUT_MODE_SEQUENCE
3: AGM933X_VAL_OUTPUT_MODE_ADVANCED_SEQUENCE
"""
return session, Type, RepCap, AttrID, buffsize, action
|
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
|
def strippedtxt(what, allowed=[]):
""" strip control characters from txt. """
txt = []
for i in what:
if ord(i) > 31 or (allowed and i in allowed): txt.append(i)
try: res = ''.join(txt)
except: res = u''.join(txt)
return res
|
def expand_curie_to_uri(curie, context_info):
"""Expand curie to uri based on the context given
parmas
======
curie: curie to be expanded (e.g. bts:BiologicalEntity)
context_info: jsonld context specifying prefix-uri relation (e.g. {"bts":
"http://schema.biothings.io/"})
"""
# as suggested in SchemaOrg standard file, these prefixes don't expand
PREFIXES_NOT_EXPAND = ["rdf", "rdfs", "xsd"]
# determine if a value is curie
if len(curie.split(":")) == 2:
prefix, value = curie.split(":")
if prefix in context_info and prefix not in PREFIXES_NOT_EXPAND:
return context_info[prefix] + value
# if the input is not curie, return the input unmodified
else:
return curie
else:
return curie
|
def update_object(obj, path, callback=None):
"""
Traverse a data structure ensuring all nodes exist.
obj: expected to be a dictionary
path: string with dot-separated path components
callback: optional callback function (described below)
When update_object reaches the parent of the leaf node, it calls the
optional callback function. The arguments to the callback function are:
- parent: dictionary containing the leaf node
- key: string key for the leaf node in parent
- created: boolean flag indicating whether any part of the path, including
the leaf node needed to be created.
If the callback function is None, update_object will still ensure that all
components along the path exist. If the leaf needs to be created, it will
be created as an empty dictionary.
Example:
update_object({}, 'foo.bar') -> {'foo': {'bar': {}}}
Return value: Returns either the return value of callback, or if callback
is None, returns the value of the leaf node.
"""
parts = path.split(".")
current = obj
parent = obj
created = False
for part in parts:
if len(part) == 0:
raise Exception("Path ({}) is invalid".format(path))
if not isinstance(current, dict):
raise Exception("Cannot set {}, not a dictionary".format(path))
# Create dictionaries along the way if path nodes do not exist,
# but make note of the fact that the previous value did not exist.
if part not in current:
current[part] = {}
created = True
parent = current
current = parent[part]
if callback is not None:
return callback(parent, parts[-1], created)
else:
return current
|
def get_ngrams(text, n):
"""Returns all ngrams that are in the text.
Inputs:
text: string
n: int
Returns:
list of strings (each is a ngram)
"""
tokens = text.split()
return [
" ".join(tokens[i : i + n]) for i in range(len(tokens) - (n - 1))
]
|
def social_optimum_cd(alpha, c):
"""
Cobb-Douglas utility social optimum
"""
if alpha > 1 - c:
return 1 / (1 - alpha)
else:
return 1 / c
|
def get_char_indexes(arg_string, target):
"""list all instances of char in string | str --> list(int)"""
indexes = [index for index, char in enumerate(arg_string)
if char == target]
return indexes
|
def trust_compatibility_score(h1_output_labels, h2_output_labels, expected_labels):
"""
The fraction of instances labeled correctly by both h1 and h2
out of the total number of instances labeled correctly by h1.
Args:
h1_output_labels: A list of the labels outputted by the model h1.
h2_output_labels: A list of the labels output by the model h2.
expected_labels: A list of the corresponding ground truth target labels.
Returns:
If h1 has any errors, then we return the trust compatibility score of h2 with respect to h1.
If h1 has no errors then we return 0.
"""
h1_correct_count = 0
h1h2_correct_count = 0
for i in range(len(expected_labels)):
h1_label = h1_output_labels[i]
h2_label = h2_output_labels[i]
expected_label = expected_labels[i]
if h1_label == expected_label:
h1_correct_count = h1_correct_count + 1
if h1_label == expected_label and h2_label == expected_label:
h1h2_correct_count = h1h2_correct_count + 1
if h1_correct_count > 0:
return (h1h2_correct_count / h1_correct_count)
return 0
|
def is_apriori(Ck_item, Lksub1):
"""
Judge whether a frequent candidate k-itemset satisfy Apriori property.
Args:
Ck_item: a frequent candidate k-itemset in Ck which contains all frequent
candidate k-itemsets.
Lksub1: Lk-1, a set which contains all frequent candidate (k-1)-itemsets.
Returns:
True: satisfying Apriori property.
False: Not satisfying Apriori property.
"""
for item in Ck_item:
sub_Ck = Ck_item - frozenset([item])
if sub_Ck not in Lksub1:
return False
return True
|
def canAddExtension(somefile, extension):
"""Ensures the given extension can even be used."""
return (((somefile is None) or (extension is None)) is False)
|
def check_string_is_empty(string):
"""name
check string empty or not
Args:
Returns:
"""
if string == '':
return True
return False
|
def datasets_to_str(datasets):
"""
This function prints out all existing datasets in the JSON format.
The output can then be copied & pasted to set the value of 'DATASETS'
in crab.datasets.
"""
import json
return str(json.dumps(datasets, indent=4, sort_keys=True))
|
def canvas2px(coord, dmn, dpi):
"""
Convert matplotlib canvas coordinate to pixels
"""
return int(round(coord * dmn * dpi))
|
def unique_everseen(items):
"""Returns only the first occurrence of the items in a list.
Equivalent to unique_everseen from the package more-itertools."""
from collections import OrderedDict
return list(OrderedDict.fromkeys(items))
|
def _get_opcode_length(op):
"""
Get length of operand bytes.
"""
while not op[-1]:
# encountered bug for the following
# 00 04 31 add byte ptr [rcx + rsi], al
if len(op) == 1:
break
op.pop()
return len(op)
|
def get_param_path(cache_dir, method, data_fn, prop_missing, max_num_feature,
feature_selection):
"""Get path of pickled parameters."""
return (
'{}/{}_datafn={}_propmiss={}_maxfeat={}_featselect={}_param.pkl'.format(
cache_dir, method, data_fn, prop_missing, max_num_feature,
feature_selection))
|
def sort_uniq(seq):
"""
Sort sequence.
Args:
seq: the sequence of the data
Returns:
sequence with sorted order and no duplicates
"""
def _check_list(x):
if isinstance(x, list):
return True
else:
return False
seen = {}
result = []
for item in seq:
marker = item
# We can have list there with comment
# So if list found just grab latest in the sublist
if _check_list(marker):
marker = marker[-1]
if marker in seen:
# Not a list, no comment to preserve
if not _check_list(item):
continue
# Here we need to preserve comment content
# As the list is already sorted we can count on it to be
# seen in previous run.
# match the current and then based on wether the previous
# value is a list we append or convert to list entirely
prev = result[-1]
if _check_list(prev):
# Remove last line of the appending
# list which is the actual dupe value
item.pop()
# Remove it from orginal
prev.pop()
# join together
prev += item
# append the value back
prev.append(marker)
result[-1] = prev
else:
# Easy as there was no list
# just replace it with our value
result[-1] = item
continue
seen[marker] = 1
result.append(item)
return result
|
def get_machines(request):
"""Returns the available machines that jobs can run on
Keyword arguments:
request - Django HttpRequest
"""
return {"localhost": {}}
|
def interpolation(list, idx1, idx2, ratio):
"""Return the interpolation between list[idx1] and list[idx2] of vector "list".
Args:
list (vector) : List of values
idx1 : the index of the first value in the list
idx2 : the index of the second value in the list
ratio : the distance of the interpolation point between the 2 values:
e.i. : y = y1 + x* (y2-y1)/(x2-x1) = y1 + ratio*(y2-y1)
Returns:
value: the interpolated value
"""
value = list[idx1] + ratio * (list[idx2] - list[idx1])
return value
|
def string(integer,len_str_out):
"""
From a given integer, return an string of length len_str_out completed by zero
Example:
ut.string(1,3)-->'001'
"""
str_zero='0'
str_int=str(integer)
len_int=len(str_int)
if len_str_out-len_int<0:
print('****ERROR: length of string too short')
str_out=''
else:
str_out=(len_str_out-len_int)*str_zero+str_int
return str_out
|
def get_images_helper(request, images):
"""
Helper method for gathering an object's list of images and formatting them along with their
corresponding types.
Parameters:
request : Request object from the serializer instance.
images : Queryset of image objects connected to the Object
Returns:
List of Image objects in JSON format.
"""
image_list = []
for image in images:
image_dict = {
"image_url": f"{request.scheme}://{request.get_host()}{image.image.url}",
"image_type": image.type.type,
}
image_list.append(image_dict)
return image_list
|
def get_bytes_from_gb(size_in_gb):
"""Convert size from GB into bytes."""
return size_in_gb * (1024 * 1024 * 1024)
|
def _normalise_and_validate_contact_urn(contact_urn):
"""
Normalises and validates the given URN.
Fails with an AssertionError if the given URN is invalid.
:param contact_urn: URN to de-identify.
:type contact_urn: str
:return: Normalised contact urn.
:rtype: str
"""
if contact_urn.startswith("tel:"):
# Some historical Golis numbers had missing `+` sign so the need to be reformatted here.
if contact_urn.startswith("tel:25290"):
contact_urn = f"{contact_urn[:4]}+{contact_urn[4:]}"
assert contact_urn.startswith("tel:+")
if contact_urn.startswith("telegram:"):
# Sometimes a telegram urn ends with an optional #<username> e.g. telegram:123456#testuser
# To ensure we always get the same urn for the same telegram user, normalise telegram urns to exclude
# this #<username>
contact_urn = contact_urn.split("#")[0]
return contact_urn
|
def convert_resnet_state_dict(src_dict):
"""Return the correct mapping of tensor name and value
Mapping from the names of torchvision model to our resnet conv_body and box_head.
"""
dst_dict = {}
for k, v in src_dict.items():
toks = k.split('.')
if k.startswith('layer'):
assert len(toks[0]) == 6
res_id = int(toks[0][5]) + 1
name = '.'.join(['res%d' % res_id] + toks[1:])
dst_dict[name] = v
elif k.startswith('fc'):
continue
else:
name = '.'.join(['res1'] + toks)
dst_dict[name] = v
return dst_dict
|
def gp_tuple_to_dict(gp_tuple):
"""Convert a groupings parameters (gp) tuple into a dict suitable
to pass to the ``grouping_parameters`` CompoundTemplate.__init__
kwarg.
"""
params = [{'min': 1}, {'max': 1}, {'name': None}, {'possible_types': None},
{'is_separator': False}, {'inner_sep_type': None}]
d = {}
for i, param in enumerate(params):
if i < len(gp_tuple):
d[list(param.keys())[0]] = gp_tuple[i]
else:
d[list(param.keys())[0]] = list(param.values())[0]
return d
|
def get_constr( module_str, start, end ):
"""Extract and return the constructor string from module_str[start:end];
also return the first position past the constructor string."""
constr_start = start
# Remove leading spaces
while module_str[ constr_start ] == ' ':
constr_start += 1
if module_str.find( '(', start, end ) != -1:
# this constructor has parameters
bracket_idx = module_str.find( ')', start, end )
constr = module_str[ constr_start : bracket_idx+1 ]
l = bracket_idx + 1
else:
# this constructor has no parameters
l = constr_start
while l < end and module_str[ l ] != ' ':
l += 1
constr = module_str[ constr_start : l ]
return constr, l
|
def lays_in(a, b):
""" Does cube a completely lay in cube b? """
a_from, a_to = a
b_from, b_to = b
return (all(b <= a for a, b in zip(a_from, b_from)) and
all(a <= b for a, b in zip(a_to, b_to)))
|
def _contains_acquire_instruction(experiments):
""" Return True if the list of experiments contains an Acquire instruction
Parameters:
experiments (list): list of schedules
Returns:
True or False: whether or not the schedules contain an Acquire command
Raises:
"""
for exp in experiments:
for inst in exp['instructions']:
if inst['name'] == 'acquire':
return True
return False
|
def b2s(a):
"""
Converts bytes to str
"""
return "".join(list(map(chr, a)))
|
def check_description(description, check_term=' and '):
"""
Check if the first argument contains the 2nd argument
:param description: str
:param check_term: str
:return: bool
"""
if description.find(check_term) >= 0:
return True
return False
|
def save_file(filename, content, header="", writemode="w"):
"""Save specified string contents to a file with the specified filename.
[description]
Arguments:
filename {[type]} -- [description]
content {[type]} -- [description]
Keyword Arguments:
writemode {str} -- Changes the write mode on output (default: {"w"})
Returns:
bool -- [description]
"""
with open(filename, writemode) as f:
f.write(header)
f.write(content)
return True
|
def fontName2FamilyName(name):
"""For now take the chunk up till "-" in the filename and ignore the family
name as set in the font.info Also make sure that the extension is removed,
if the font has no "-" it isn't name. Relay-Medium_Italic.ufo becomes
Relay. ThisFont.ufo becomes ThisFont."""
return name.split('.')[0].split('-')[0]
|
def windows_only_packages(pkgs):
"""Find out which packages are windows only"""
res = set()
for p in pkgs:
for name, info in p.items():
if info.get("OS_type", "").lower() == "windows":
res.add(name)
return res
|
def dict_splicer(plot_dict,Ld,Lx):
"""Dictionary constructor for plotting
Base-level function used by most other plotting functions to construct a list of dictionaries,
each containing the passed arguments to the underlying plotting calls for each different dataset.
Parameters
----------
plot_dict : dict
Contains the parameters to be passed to the underlying plotting function.
Ld : int
Number of plots to be made.
Lx : list
Contains the lenght of the data array of each plot to be made.
Returns
-------
dict_list : list
List of dictionaries, one for each plot to be made.
"""
from numbers import Number
dict_list=[]
dict_keys=plot_dict.keys()
for i in range(Ld):
temp_dict={}
for k in dict_keys:
try:
temp=(i for i in plot_dict[k])
except TypeError:
temp_dict[k]=plot_dict[k]
else:
if type(plot_dict[k]) is str or len(plot_dict[k])==Lx[i]:
temp_dict[k]=plot_dict[k]
elif (k in 'color' or 'color' in k) and (type(plot_dict[k]) is str or isinstance(plot_dict[k][0],Number)):
temp_dict[k]=plot_dict[k]
else:
temp_dict[k]=plot_dict[k][i]
dict_list.append(temp_dict)
return(dict_list)
|
def filter_dict(pred, d) :
"""Return a subset of the dictionary d, consisting only of the keys that satisfy pred(key)."""
ret = {}
for k in d :
if pred(k) :
ret[k] = d[k]
return ret
|
def _get_a0(nshell, ncomponent):
"""Return the first mass number in the given shell
:param nshell: shell (0=s, 1=p, 2=sd, ...)
:param ncomponent: 1 -> neutrons, 2 -> protons & neutrons
"""
return int((nshell+2) * (nshell+1) * nshell/3 * ncomponent)
|
def get_index_of_most_important_sentences(weighted_sums, num_vectors):
""" Finds and returns the index of the two sentences with the highest weighted sum """
largest_sum_index, second_largest_sum_index = 0, 0
largest_sum, second_largest_sum = 0, 0
for i in range(num_vectors):
current_sum = weighted_sums[i]
if current_sum > largest_sum:
largest_sum = current_sum
largest_sum_index = i
elif current_sum > second_largest_sum:
second_largest_sum = current_sum
second_largest_sum_index = i
return largest_sum_index, second_largest_sum_index
|
def transform_part_info(data):
"""Transform the participants annotation JSON, into one that has names as keys."""
new_data={}
for part_id, part in data.items():
if 'Name' in part and part['Name'].strip():
name=part['Name'].strip()
del part['Name']
new_part={}
for k,v in part.items():
k=k.strip().lower()
new_part[k]=v.strip().lower()
if k=='ethnicity':
print(new_part[k], part_id)
new_data[name]=new_part
return new_data
|
def buildRowBorder(start, middle, end, spacer, dateWidth, editorWidths, editorIds):
"""
Create a row border line.
:param start: The character to use at the start
:param middle: The character to use for each middle column
:param end: The character to use at the end
:param spacer: The character to use for each cell ceiling/floor
:param dateWidth: The width of the date column
:param editorWidths: The width of each editors column
:param editorIds: The ids of each editor
:return:
"""
line = start + spacer * (dateWidth + 2)
for editor in editorIds:
line += middle + spacer * (editorWidths[editor] + 2)
return line + end
|
def XOR(x,y,z):
""" Bitwise XOR """
y = int(x, 2)^int(y, 2)^int(z, 2)
return bin(y)[2:].zfill(len(x))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.