content stringlengths 42 6.51k |
|---|
def plist_set_bundle_id(plist_data, bundle_id):
"""
<key>CFBundleIdentifier</key>
<string>com.turbulenz.turbulenzdev</string>
"""
plist_lines = plist_data.split("\n")
plist_num_lines = len(plist_lines)
out_lines = []
line_idx = 0
while line_idx < plist_num_lines:
l = plist_lines[line_idx]
line_idx += 1
out_lines.append(l)
if -1 != l.find("CFBundleIdentifier"):
out_lines.append(" <string>%s</string>" % bundle_id)
line_idx += 1
return "\n".join(out_lines) |
def argmax(ls):
"""
Argmax function finding the maximum value for a target function
:parameter ls: List of values
:returns: Max value
"""
m = -1.0
result = -1
for n, l in enumerate(ls):
if l > m:
m = l
result = n
return result |
def colored(name: str, color: str = "", style: str = "") -> str:
"""Colors a string with given color name."""
try:
import colorama # pylint: disable=import-outside-toplevel
colors = {
"green": colorama.Fore.GREEN,
"red": colorama.Fore.RED,
"blue": colorama.Fore.BLUE,
"gray": colorama.Fore.LIGHTWHITE_EX,
}
styles = {"bright": colorama.Style.BRIGHT, "dim": colorama.Style.DIM}
reset = colorama.Style.RESET_ALL
except ModuleNotFoundError: # pragma: no cover
colors = {}
reset = ""
styles = {}
_color = colors.get(color, "")
_style = styles.get(style, "")
return f"{_style}{_color}{name}{reset}" |
def _eval_at(poly, x, prime):
"""Evaluates polynomial (coefficient tuple) at x, used to generate a
shamir pool in make_random_shares below.
"""
accum = 0
for coeff in reversed(poly):
accum *= x
accum += coeff
# accum %= prime
return accum |
def integerize(num, count):
"""Calculate and return integer value if result is integer"""
calc = num * count
calc = int(calc) if calc.is_integer() else calc
return calc |
def filter_networks(networks, wanted):
"""
Return a list of desired Network objects.
:param networks:
list of Network dicts
:param wanted:
list of cidrs you want
"""
return [
n for n in networks if '%s/%s' % (
n['network_address'], n['prefix_length']
) in wanted
] |
def matrixAsList( matrix, value = True ):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells |
def cmd_link(lang, shared):
"""Return linker command.
Parameters
----------
lang : {'icc', 'c', 'cuda'}
Programming language
shared : bool
``True`` if shared
Returns
-------
cmd : list of `str`
List with linker command
"""
cmd = None
if lang == 'icc':
cmd = ['icc']
elif lang == 'c':
cmd = ['gcc']
elif lang == 'cuda':
cmd = ['nvcc'] if not shared else ['g++']
else:
print('Lang must be one of {icc, c, cuda}')
raise
return cmd |
def add_extra_ybd_args(ybd_args, extra_args):
"""
Inserts extra arguments into a list of yb_build.sh arguments. If a "--" argument is present,
new arguments are inserted before it, because the rest of yb_build.sh's arguments may be passed
along to yet another command.
:param ybd_args: existing yb_build.sh arguments
:param extra_args: extra arguments to insert
:return: new list of yb_build.sh arguments
"""
for i in range(len(ybd_args)):
if ybd_args[i] == '--':
return ybd_args[:i] + extra_args + ybd_args[i:]
return ybd_args + extra_args |
def uniform(record_list):
"""Generate uniform abundance distribution from a number of records
Args:
record_list (list): a list of record.id
Returns:
dict: a dictionary with records as keys, abundance as values
"""
abundance_dic = {}
n_records = len(record_list)
for record in record_list:
abundance_dic[record] = 1 / n_records
return abundance_dic |
def derivative(x_axis, y_axis, dx):
"""
Calculate the derivative of f(x)
:param x_axis: list of the x axis data
:param y_axis: list of the y axis data
:param dx: difference of x axis
:return: f'(x)
"""
der = (dx//2+dx%2)*[0]
for i in range(len(x_axis) - dx):
der.append((y_axis[i + dx] - y_axis[i]) / (x_axis[i + dx] - x_axis[i]))
der += dx // 2 * [der[-1]]
for i in range(dx//2+dx%2):
der[i] = der[dx//2+dx%2+1]
return der |
def hey(phrase):
""" Say something to bob """
phrase = phrase.strip()
if phrase == "":
return "Fine. Be that way!"
if phrase.endswith("?"):
if phrase.isupper():
return "Calm down, I know what I'm doing!"
return "Sure."
if phrase.isupper():
return "Whoa, chill out!"
return "Whatever." |
def jump_if_true(params, modes, values, target_loc, current, puzzle_input):
"""If the first parameter is non-zero, it sets the instruction pointer to
the second value. Otherwise, it does nothing.
"""
if values[0] != 0:
# if modes[1] == 0:
# current = puzzle_input[params[1]]
# elif modes[1] == 1:
# current = params[1]
# elif modes[1] == 2:
# current = puzzle_input[params[1] + relative base]
current = puzzle_input[0, target_loc]
return current |
def fparse(fname):
"""
Filename parser for NIFTI and AFNI files.
Returns prefix from input datafiles (e.g., sub_001) and filetype (e.g.,
.nii). If AFNI format is supplied, extracts space (+tlrc, +orig, or +mni)
as filetype.
Parameters
----------
fname : str
Returns
-------
str, str: prefix of filename, type of filename
"""
if '.' in fname:
if '+' in fname: # i.e., if AFNI format
prefix = fname.split('+')[0]
suffix = ''.join(fname.split('+')[-1:])
ftype = '+' + suffix.split('.')[0]
else:
if fname.endswith('.gz'): # if gzipped, two matches for ftype
prefix = '.'.join(fname.split('.')[:-2])
ftype = '.' + '.'.join(fname.split('.')[-2:])
else:
prefix = '.'.join(fname.split('.')[:-1])
ftype = '.' + ''.join(fname.split('.')[-1:])
else:
prefix = fname
ftype = ''
return prefix, ftype |
def normalize_route(route: str) -> str:
"""Strip some of the ugly regexp characters from the given pattern.
>>> normalize_route('^/user/<user_id:int>/?$')
u'/user/(user_id:int)/'
"""
normalized_route = str(route).lstrip('^').rstrip('$').rstrip('?')
normalized_route = normalized_route.replace('<', '(').replace('>', ')')
return normalized_route |
def leap_is_one(year):
"""
taking a year, tells caller if this is a leap year
"""
# TomareUtsuZo
return (year % 400 == 0) or ((year % 100 != 0) and year % 4 == 0) |
def get_current_state(slack_map):
"""
Get the current state of Slack usergroups.
:param slack_map: Slack data from app-interface
:type slack_map: dict
:return: current state data, keys are workspace -> usergroup
(ex. state['coreos']['app-sre-ic']
:rtype: dict
"""
current_state = {}
for workspace, spec in slack_map.items():
slack = spec['slack']
managed_usergroups = spec['managed_usergroups']
for ug in managed_usergroups:
users, channels, description = slack.describe_usergroup(ug)
current_state.setdefault(workspace, {})[ug] = {
"workspace": workspace,
"usergroup": ug,
"users": users,
"channels": channels,
"description": description,
}
return current_state |
def inc_column(idx, column, by=1):
"""Increment the index and column by the given amount
:param idx: The current LED matrix
:param column: The current column
:param by: How much to move
:return: Tuple of (idx, column)
"""
column += by
while column > 7:
idx += 8
column -= 8
return (idx, column) |
def _enable_atomic_add(kernel_info):
"""Judge whether to enable atomic add."""
for op in kernel_info["op_desc"]:
if not op["attr"]:
continue
for attr in op["attr"]:
if attr["name"] == "enable_atomic_add" and attr["value"]:
return True
return False |
def is_triplet(triplet: list) -> bool:
"""
A Pythagorean triplet is a set of three natural numbers:
{a, b, c}
for which:
a**2 + b**2 = c**2
and such that:
a < b < c
:param triplet:
:return:
"""
return triplet[0] ** 2 + triplet[1] ** 2 == triplet[-1] ** 2 |
def is_binarystring(s):
"""Return true if an object is a binary string (not unicode)"""
return isinstance(s, bytes) |
def normalize_string(names):
"""Format strings to be lowercase and capitalized, per word in string.
E.g.: 'FOO BAR' becomes 'Foo Bar'
Args:
names: List of strings.
Returns:
List of normalized strings.
"""
new_names = []
for name in names:
new_names.append(
' '.join([n.lower().capitalize() for n in name.split(' ')])
)
return new_names |
def get_average(list_of_numbers: list) -> float:
"""Get an average of the numbers in a list
Args:
list_of_numbers: A list of floats
Returns:
list_average: A float containing the list average
"""
sum_of_numbers = 0
for number in list_of_numbers:
sum_of_numbers = sum_of_numbers + number
list_average = sum_of_numbers / len(list_of_numbers)
return round(list_average, 2) |
def base_dp_hp_kernel_config(defn, hp1=5, hp2=.1):
"""Sample the base level Dirichlet process parameter (gamma)
using the method of Escobar and West (1995) with n = T.
Parameters
----------
defn : LDA model definition
"""
return [('direct_base_dp_hp', {'hp1': hp1, 'hp2': hp2})] |
def _get_id(obj):
"""Return object id.
Allows usage of both an object or an object's ID as a parameter when
dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj |
def ToOrdinal(value):
"""
Convert a numerical value into an ordinal number.
@param value: the number to be converted
"""
if value % 100//10 != 1:
if value % 10 == 1:
ordval = '{}st'.format(value)
elif value % 10 == 2:
ordval = '{}nd'.format(value)
elif value % 10 == 3:
ordval = '{}rd'.format(value)
else:
ordval = '{}th'.format(value)
else:
ordval = '{}th'.format(value)
return ordval |
def semitones_to_pitch_bend(semitones, semitone_range=2.):
"""Convert a semitone value to the corresponding MIDI pitch bend integer.
Parameters
----------
semitones : float
Number of semitones for the pitch bend.
semitone_range : float
Convert to +/- this semitone range. Default is 2., which is the
General MIDI standard +/-2 semitone range.
Returns
-------
pitch_bend : int
MIDI pitch bend amount, in ``[-8192, 8191]``.
"""
return int(8192*(semitones/semitone_range)) |
def variable_dict(id_name, polscore_name, x_ord_name, x_unord_name,
effect_vs_0, effect_vs_0_se):
"""Pack variable names into a dictionary.
Parameters
----------
id_name : Tuple of string.
polscore : Tuple of strings.
x_ord_name : Tuple of strings.
x_unord_name : Tuple of strings.
Returns
-------
var : Dictionary. Variable names
"""
def capital_letter_and_list(string_list):
if not isinstance(string_list, list):
string_list = list(string_list)
string_list = [s.upper() for s in string_list]
return string_list
if id_name is None:
id_name = []
else:
id_name = capital_letter_and_list(id_name)
if (polscore_name is None) or (polscore_name == []):
raise Exception('Policy Score must be specified.')
polscore_name = capital_letter_and_list(polscore_name)
if x_ord_name is None:
x_ord_name = []
else:
x_ord_name = capital_letter_and_list(x_ord_name)
if x_unord_name is None:
x_unord_name = []
else:
x_unord_name = capital_letter_and_list(x_unord_name)
if (x_ord_name == []) and (x_unord_name):
raise Exception('x_ord_name or x_unord_name must contain names.')
if effect_vs_0 is None:
effect_vs_0 = []
else:
effect_vs_0 = capital_letter_and_list(effect_vs_0)
if effect_vs_0_se is None:
effect_vs_0_se = []
else:
effect_vs_0_se = capital_letter_and_list(effect_vs_0_se)
var = {'id_name': id_name,
'polscore_name': polscore_name,
'x_ord_name': x_ord_name,
'x_unord_name': x_unord_name,
'effect_vs_0': effect_vs_0,
'effect_vs_0_se': effect_vs_0_se}
return var |
def infer_status(statuses):
"""Infers an object status from the statuses passed in
Parameters
----------
statuses : list of lists of strings or empty list
The list of statuses used to infer the resulting status (the result
of execute_fetchall)
Returns
-------
str
The inferred status
Notes
-----
The inference is done in the following priority (high to low):
(1) public
(2) private
(3) awaiting_approval
(4) sandbox
"""
if statuses:
statuses = set(s[0] for s in statuses)
if 'public' in statuses:
return 'public'
if 'private' in statuses:
return 'private'
if 'awaiting_approval' in statuses:
return 'awaiting_approval'
# If there are no statuses, or any of the previous ones have been found
# then the inferred status is 'sandbox'
return 'sandbox' |
def pad_ij_to_pad_n(i, j):
"""Transform (i, j) coordinates to the corresponding pad number
according to the specification. (0, 0) corresponds to the top-left pad while
(7, 7) corresponds to the bottom right pad.
See https://github.com/Ableton/push-interface/blob/master/doc/AbletonPush2MIDIDisplayInterface.asc#23-midi-mapping
"""
def clamp(value, minv, maxv):
return max(minv, min(value, maxv))
return 92 - (clamp(i, 0, 7) * 8) + clamp(j, 0, 7) |
def to_flatten_dict(d, parent_key="", sep="."):
"""
Parse properties dict to dot notation
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if v and isinstance(v, dict):
items.extend(to_flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items) |
def to_frame_id(track_frame_id):
"""
Takes a bb_tracker track id and returns the frame id within.
E.g. to_frame_id('f14083813666064354331d24c1') = 14083813666064354331
Args:
track_frame_id (str): The frame identifier for the track.
Returns:
str: The frame id
"""
return int(track_frame_id.split('d')[0][1:]) |
def getDivisors(n):
"""
input: positive integer 'n' >= 1
returns all divisors of n (inclusive 1 and 'n')
"""
# precondition
assert isinstance(n,int) and (n >= 1), "'n' must been int and >= 1"
from math import sqrt
ans = [] # will be returned.
for divisor in range(1,n+1):
if n % divisor == 0:
ans.append(divisor)
#precondition
assert ans[0] == 1 and ans[len(ans)-1] == n, \
"Error in function getDivisiors(...)"
return ans |
def index_keyed_tuple_data(keyed_tuple):
"""
keyed_tuple:
(<key>, <data to be indexed (must be a list)>)
"""
key = keyed_tuple[0]
data_list = list(keyed_tuple[1])
indexed_data_list = [(i, data_item) for i, data_item in enumerate(data_list)]
return indexed_data_list |
def compare_partial_dicts(result, expected):
"""
Make sure all the keys in expected are matched by keys in result, and
that the values stored in those keys match. Result can contain more
items than expected - those are ignored.
Used in the test_lvs, test_pvs and test_vgs tests.
"""
# return all(result[k] == expected[k] for k in expected.keys())
mismatches = 0
for k in expected.keys():
if not result[k] == expected[k]:
print("Failed for key {k}, {r} != {e}".format(k=k, r=result[k], e=expected[k]))
mismatches += 1
return mismatches == 0 |
def get_rounds(number):
"""
Get list of current and next rounds
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return [number, number + 1, number + 2] |
def first_digit(number: int) -> int:
"""
>>> first_digit(-123)
1
>>> first_digit(0)
0
>>> first_digit(123)
1
>>> first_digit(123456789)
1
"""
number = abs(number)
while number >= 10:
number //= 10
return number |
def specify_rule(rule, num):
"""Specify the format of the rule.
num == 1 will return [rule], single
num > 1 will return [rule]{num}, with number
num < 0 will return [rule]+, wildcard
num == 0 will raise ValueError
Args:
rule (str): The raw rule string to be secified.
num (int): The num of the rule. Can't be 0.
Raises:
ValueError: If the num == 0.
Returns:
str: The specified format of the rule.
Examples:
>>> from os_urlpattern.parse_utils import specify_rule
>>> specify_rule('a-z', 1)
[a-z]
>>> specify_rule('a-z', 2)
[a-z]{2}
>>> specify_rule('a-z', -1)
[a-z]+
"""
if num == 1:
return '[%s]' % rule
elif num < 0:
return '[%s]+' % rule
elif num > 1:
return '[%s]{%d}' % (rule, num)
else:
raise ValueError('Invalid num %s' % str(num)) |
def call_method(obj, func, *args, **kwargs):
"""Calls method from specified object with arguments"""
return getattr(obj, func)(*args, **kwargs) |
def consolidate(arr):
"""Merges intersecting sets in a list of sets.
Taken from: http://rosettacode.org/wiki/Set_consolidation#Python:_Iterative
Recursive version will hit max recursion depth.
"""
sets = [s for s in arr if s]
for i, s1 in enumerate(sets):
if s1:
for s2 in sets[i+1:]:
if s1.intersection(s2):
s2.update(s1)
s1.clear()
s1 = s2
return [s for s in sets if s] |
def count_substring(string, sub_string):
"""
count=0
for i in range(0,len(string)):
print(string[i:i+len(sub_string)])
if string[i:i+len(sub_string)]==sub_string:
count+=1
"""
#chnage this to one line
return sum([1 for i in range(0,len(string)) if string[i:i+len(sub_string)]==sub_string])
#return count
|
def is_prime(number):
"""Returns True if the specified number is prime"""
if number <= 1:
return False
count = 2
while count ** 2 <= number:
if number % count == 0:
return False
count += 1
return True |
def is_valid_state_change_response(response):
"""
Returns true if a key event response is valid.
str -> bool
"""
return len(response) > 0 |
def _html_attrs_to_str(attrs):
"""Converts a dictionary of HTML attributes to its HTML representation.
Args:
attrs (dict): Attributes with values
Returns:
str: HTML attributes string ready to be used inside a HTML tag.
"""
response = ''
for name, value in attrs.items():
response += name
if value is not None:
response += f'="{value}"'
response += ' '
return response.rstrip(' ') |
def reverse_remap_to_dict(data: list):
"""
From a list of dictionnaries of values, remap the
different items to create a dict where the keys
are a list of values
Parameters
----------
data (list): [{'a': 1}, {'a': 2}]
Returns
-------
list: list of dictionnaries
"""
items = dict()
for key in data[-0].keys():
items.setdefault(key, [])
for item in data:
for key, value in item.items():
items[key].append(value)
return items |
def _road_section_from_nodes(origin: str, destination: str) -> str:
"""Create a road section 'A->B' from two nodes 'A' and 'B'."""
return f"{origin}->{destination}" |
def build_type_flag(compiler, build_type):
"""
returns flags specific to the build type (Debug, Release, etc.)
(-s, -g, /Zi, etc.)
"""
if not compiler or not build_type:
return ""
if str(compiler) == 'Visual Studio':
if build_type == 'Debug':
return '/Zi'
else:
if build_type == 'Debug':
return '-g'
elif build_type == 'Release' and str(compiler) == 'gcc':
return '-s'
return "" |
def hexStrToInt(inputstr):
"""
Converts a string with hex bytes to a numeric value
Arguments:
inputstr - A string representing the bytes to convert. Example : 41414141
Return:
the numeric value
"""
valtoreturn = 0
try:
valtoreturn = int(inputstr, 16)
except:
valtoreturn = 0
return valtoreturn |
def whilst(b, x):
"""
Like using a takewhile in comprehensions. It aborts the remainder
of the iterable.
But unlike a StopIteration, the remaining other loops continue.
>>> from itertools import takewhile
>>> [(x,y) for x in takewhile(lambda x:x<3,range(10))
... for y in takewhile(lambda y:y<2,range(10))]
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
>>> list(In(range(10),lambda x:
... whilst(x<3, In(range(10), lambda y:
... whilst(y<2,((x,y),))))))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
Notice that y has to be bound twice in the
list-comprehension/takewhile version, but not using In/whilst.
>>> [x+y for x in 'abc' for y in takewhile(lambda y: x!=y,'zabc')]
['az', 'bz', 'ba', 'cz', 'ca', 'cb']
>>> list(In('abc',lambda x:
... In('zabc',lambda y:
... whilst(x!=y, (x+y,) ))))
['az', 'bz', 'ba', 'cz', 'ca', 'cb']
This is different than if (or `when` inside `In`), which keeps
checking
>>> [x+y for x in 'abc' for y in 'zabc' if x!=y]
['az', 'ab', 'ac', 'bz', 'ba', 'bc', 'cz', 'ca', 'cb']
"""
if b:
return x
else:
raise StopIteration |
def remove_digits(integer):
"""gibt eine Liste aller Zahlen aus, die entstehen, wenn man von der gegebenen Zahl nacheinander sowohl von vorne als auch von hinten die Ziffern wegnimmt (ohne Zahl selber)
Bsp.: 3797 -> 797, 97, 7, 379, 37, 3 (nicht in dieser Reihenfolge)
"""
string = str(integer)
zahlen = []
laenge = len(string)
for i in range(laenge-1):
zahlen.append(int(string[i+1:laenge]))
zahlen.append(int(string[0:i+1]))
return zahlen |
def crossSet(list1, list2):
""" return the cross-set of list1 and list2
"""
return list(set(list1).intersection(list2)), list(set(list1).symmetric_difference(list2)) |
def quote_escape(value, lf='&mjf-lf;', quot='&mjf-quot;'):
"""
Escape a string so that it can safely be quoted. You should use this if the
value to be quoted *may* contain line-feeds or both single quotes and double
quotes.
If the value contains ``\n`` then it will be escaped using ``lf``. By
default this is ``&mjf-lf;``.
If the value contains single quotes *and* double quotes, then all double
quotes will be escaped using ``quot``. By default this is ``&mjf-quot;``.
>>> quote_escape('hello')
'hello'
>>> quote_escape('hello\\n')
'hello&mjf-lf;'
>>> quote_escape('hello"')
'hello"'
>>> quote_escape('hello"\\'')
"hello&mjf-quot;'"
>>> quote_escape('hello"\\'\\n', '&fish;', '&wobble;')
"hello&wobble;'&fish;"
"""
if '\n' in value:
value = value.replace('\n', lf)
if '\'' in value and '\"' in value:
value = value.replace('"', quot)
return value |
def convertir_tiempo(segundos: int) -> str:
"""Convierte un tiempo en segundos a horas, minutos y segundos
:param segundos: tiempo en segundos
:type segundos: int
:return: tiempo en formato hh:mm:ss
:rtype: str
"""
horas = segundos // 3600
minutos = (segundos % 3600) // 60
segundos = segundos % 60
return f"{horas:02d}:{minutos:02d}:{segundos:02d}" |
def intersect(lst1, lst2):
"""
param: lst1 - list
param: lst2 - list
return: list of common elements
"""
temp = set(lst2)
lst3 = [value for value in lst1 if value in temp]
return lst3 |
def format_time_and_value_to_segment_list(time_and_value_list, segments_count, start_timestamp,
end_timestamp, average=False):
"""
Format time_and_value_list to time segments
Parameters
----------
time_and_value_list: list of tuples
Have to be sorted by time
Example: [(time, value), (time, value) ...]
segments_count: integer
How many segments will be in result
Returns
-------
List of dictionaries
Example:
[{'from': time1, 'to': time2, 'value': sum_of_values_from_time1_to_time2}, ...]
"""
segment_list = []
time_step = (end_timestamp - start_timestamp) / segments_count
for i in range(segments_count):
segment_start_timestamp = start_timestamp + time_step * i
segment_end_timestamp = segment_start_timestamp + time_step
value_list = [
value for time, value in time_and_value_list
if time >= segment_start_timestamp and time < segment_end_timestamp]
segment_value = sum(value_list)
if average and len(value_list) != 0:
segment_value /= len(value_list)
segment_list.append({
'from': segment_start_timestamp,
'to': segment_end_timestamp,
'value': segment_value,
})
return segment_list |
def get_most_common_tuple(lst):
"""Get the mode of a list of tuples."""
return max(set(lst), key=lst.count) |
def pick_every_k(l, k):
"""Picks out every k'th element from the sequence, using round()
when k is not integer."""
result = []
x = k
while round(x) < len(l):
result.append(l[int(round(x))])
print("picking ", int(round(x)))
x += k
return result |
def get_sorted_attributes_list(attributes):
"""
Gets sorted attributes list
Parameters
----------
attributes
Dictionary of attributes associated with their count
Returns
----------
listact
Sorted end attributes list
"""
listattr = []
for a in attributes:
listattr.append([a, attributes[a]])
listattr = sorted(listattr, key=lambda x: x[1], reverse=True)
return listattr |
def odds_or_evens(my_bool, nums):
"""Returns all of the odd or
even numbers from a list"""
return_list = []
for num in nums:
if my_bool:
if num % 2 == 0:
return_list.append(num)
else:
if num % 2 != 0:
return_list.append(num)
return return_list |
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
https://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result |
def flin( x, a, b ):
"""Linear function of x
Parameters
----------
x : float or ndarray or list of float
x values (independent variable)
a, b : float
parameters for the model (intercept, slope)
Returns
-------
yy : ndarray of float
array of y values
"""
return a + b*x |
def super_signature(signatures):
""" A signature that would break ambiguities """
n = len(signatures[0])
assert all(len(s) == n for s in signatures)
return [max([type.mro(sig[i]) for sig in signatures], key=len)[0]
for i in range(n)] |
def create_dict(keys):
"""
>>> keys = {'red', 'green', 'blue', 'yellow', 'orange', 'pink', 'black'}
>>> create_dict(keys)
{'pink': None, 'red': None, 'black': None, 'green': None, 'yellow': None, 'orange': None, 'blue': None}
"""
d = dict.fromkeys(keys) # dict is pre-sized to 32 empty slots
d.update(dict(d)) # This makes room for additional keys and makes the set collision-free.
return d |
def iterate_module_func(m, module, func, converged):
"""Call function func() in specified module (if available) and use the result to
adjust model convergence status. If func doesn't exist or returns None, convergence
status will not be changed."""
module_converged = None
iter_func = getattr(module, func, None)
if iter_func is not None:
module_converged = iter_func(m)
if module_converged is None:
# module is not taking a stand on whether the model has converged
return converged
else:
return converged and module_converged |
def roc_name_adjust(varimp_names):
"""
cleans up the column names for the variable importance plot for publishing
"""
adjusted_names=[]
mapper={'vent_recieved_2.0': 'mechanical ventilation recieved',
'vent_recieved_1.0': 'oxygen ventilation recieved',
'vent_recieved_1.0': 'no ventilation recieved',
'pao2fio2ratio':'PaO2:FiO2',
'ipco2_>50': 'pCO2 (>50)',
'ibands_>10': 'bands (>10)',
'ibands_absent': 'bands (missing)'}
for element in varimp_names:
if element in mapper.keys():
element= mapper[element]
adjusted_names.append(element)
elif "_1.0" in element:
element= element.strip("_1.0") + ' (Y/N)'
adjusted_names.append(element)
else:
adjusted_names.append(element)
return(adjusted_names) |
def pair_sum_in_array(array, x):
"""Check whether there is a pair of numbers in sorted array with sum = x."""
l = 0
r = len(array) - 1
while l < r:
if array[l] + array[r] == x:
return True
elif array[l] + array[r] < x:
l += 1
else:
r -= 1
return False |
def mean(values):
"""
Mean function.
"""
m = 0.0
for value in values:
m = m + value/len(values)
return m |
def without_score(contribution):
"""
Returns a contribution without the score.
"""
return {x: contribution[x] for x in contribution if x != "score"} |
def two_dec(num: float) -> str:
"""Format a floating point number with 2 decimal places."""
return "{0:.2f}".format(num) |
def format_str_for_write(input_str: str) -> bytes:
"""Format a string for writing to SteamVR's stream."""
if len(input_str) < 1:
return "".encode("utf-8")
if input_str[-1] != "\n":
return (input_str + "\n").encode("utf-8")
return input_str.encode("utf-8") |
def pair_interactions(x):
"""Pair interacting nodes"""
split_line = x.split()
pair = "{0}_{1}".format(split_line[0], split_line[3])
return pair |
def goal_priors(T):
"""
Return the prior probability of each goal given no other information
"""
goal_priors = {}
for _, g in T:
if g not in goal_priors:
goal_priors[g] = 1
else:
goal_priors[g] += 1
for g in goal_priors:
goal_priors[g] /= len(T)
return goal_priors |
def get_middle_opt(string: str) -> str:
"""Get middle value of a string (efficient).
Examples:
>>> assert get_middle_opt('middle') == 'dd'
"""
return (
string[int(len(string) // 2) - 1 : int(len(string) // 2) + 1]
if not len(string) % 2
else string[int(len(string) // 2)]
) |
def handle_response(response):
"""Urllib returns different types in Python 2 and 3 (str vs bytes)"""
if isinstance(response, str):
return response
else:
return response.decode('utf-8') |
def get_observation_variation_categories(objects):
"""
Obtain categories by which, for a given scenario and partition, ie, a decoupled subsystem
produce variation in quantitative observations, not in the system structure.
The most important ones are TIME and OBSERVER
:param objects:
:return:
"""
return ["TIME", "SOURCE"] |
def symmetric_difference(left, right):
""":yaql:symmetricDifference
Returns symmetric difference of left and right sets as a new set.
:signature: left.symmetricDifference(right)
:receiverArg left: left set
:argType left: set
:arg right: right set
:argType right: set
:returnType: set
.. code::
yaql> set(0, 1, 2).symmetricDifference(set(0, 1, 3))
[2, 3]
"""
return left.symmetric_difference(right) |
def flatten_acfg_list(acfg_list):
"""
Returns a new config where subconfig params are prefixed by subconfig keys
"""
flat_acfg_list = []
for acfg in acfg_list:
flat_dict = {
prefix + '_' + key: val
for prefix, subdict in acfg.items()
for key, val in subdict.items()
}
flat_acfg_list.append(flat_dict)
return flat_acfg_list |
def relu(x):
"""
:math:`f(x) =` x if x is greater than 0, else 0
(See `<https://en.wikipedia.org/wiki/Rectifier_(neural_networks)>`_ .)
"""
return x if x > 0 else 0
raise NotImplementedError('Need to implement for Task 0.1') |
def concatenateLines ( oldcontent ):
""" of all lines in the list "oldcontent", concatenate the ones
that end with \ or , """
content=[] ## concatenate lines that end with "," or "\"
tmp=""
for line in oldcontent:
tmp+=line.strip()
if tmp != "" and tmp[-1] not in [ ",", '\\' ]:
content.append ( tmp )
tmp=""
if tmp != "" and tmp[-1] == '\\':
tmp=tmp[:-1] # remove trailing \ (but keep trailing ,)
return content |
def boldheader(title):
"""Convert the given string into bold string, prefixed and followed by
newlines."""
return "\n\n**%s**\n\n" % str(title).strip() |
def merge_v2(intervals):
""" Merges intervals in the form of list. """
if intervals is None:
return None
intervals.sort(key=lambda i: i[0])
out = [intervals.pop(0)]
for i in intervals:
if out[-1][-1] >= i[0]:
out[-1][-1] = max(out[-1][-1], i[-1])
else:
out.append(i)
return out |
def validate_type(value):
"""Validate "type" parameter."""
if value not in ("default", "command"):
raise ValueError('Must be "default" or "command"')
return value |
def run_all_analysis_for_a_clustering(clustering_id, clustering, analysis):
"""
Is the function to be run in parallel.
@param clustering_id: Is the id of the clustering we are working with.
@param clustering: A Clustering instance.
@param analysis: A list of all the analysis we want to perform.s
@param observer: An observer to communicate messages.
"""
analysis_results = {}
for a in analysis:
analysis_results[a.name] = a.run(clustering)
return (clustering_id, analysis_results) |
def combineHv(x1, x2):
""" Combine/Reduce function for the mappers
Parameters
----------
x1 : list
element1
x2 : list
element2
Returns
-------
tuple
``(x1[0]+x2[0], x1[1]+x2[1])``
"""
return (x1[0]+x2[0], x1[1]+x2[1]) |
def depth(n):
"""
depth computes the depth of node n.
"""
d = 0
while n is not None:
n = n.parent
d += 1
return d |
def get_auth_type_from_header(header):
"""
Given a WWW-Authenticate or Proxy-Authenticate header, returns the
authentication type to use. We prefer NTLM over Negotiate if the server
suppports it.
"""
if "ntlm" in header.lower():
return "NTLM"
elif "negotiate" in header.lower():
return "Negotiate"
return None |
def scalar_function(x, y):
"""
Returns the f(x,y) defined in the problem statement.
"""
if x <= y:
return x * y
else:
return x / y |
def write_kpointfile(ini0):
"""
Take a dictionary with all the relevant information for the
structure, extract the Kpoints, and write the data to the
kpointfile.
"""
kpointfile=open("kpoints",'w')
length = len(ini0["kpoint_label"])
kpointfile.write("\n")
kpointfile.write("frac\n")
kpointfile.write("%5i\n" %(length))
for ii in range(length):
kpointfile.write("%5i %12.8f %12.8f %12.8f %s\n" %
(ii+1, ini0['kpoint'][ii][0], ini0['kpoint'][ii][1], ini0['kpoint'][ii][2], ini0['kpoint_label'][ii]))
kpointfile.close()
return None |
def Get_Human_Readable(size, precision=2):
"""http://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
"""
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size/1024.0 # apply the division
return "%.*f%s" % (precision, size, suffixes[suffixIndex]) |
def nl2br(s):
"""Related to string operation
>>> nl2br('abc\nxyz')
'abc<br/>xyz'
"""
return '<br/>'.join(s.split('\n')) |
def get_batch_series(source):
"""Extracts sim_batch and series values from full source string
e.g.: sim10_1 would return (10, 1)
"""
stripped = source.strip('sim')
sim_batch = int(stripped[:2])
series = int(stripped[3:])
return sim_batch, series |
def convert_dtw_struc_dist(distances, start_layer=1):
"""
Convert
:param distances: dict of dict
:param start_layer:
:return:
"""
for vertices, layers in distances.items():
keys_layers = sorted(layers.keys())
start_layer = min(len(keys_layers), start_layer)
for layer in range(0, start_layer):
keys_layers.pop(0)
for layer in keys_layers:
layers[layer] += layers[layer - 1]
return distances |
def is_first_char(input_string, char):
""" INPUT:
input_string = string of any length, typically a line in a file being parsed
char = string of the character we want to compare with to determine a match
RETURN:
True ; if char == first character in input_string
False ; if char != first character in input_string, or if input_string is empty
"""
input_string = input_string.strip() # remove empty spaces around string
if len(input_string) == 0 :
return False
if ( input_string[0] == char ):
return True
return False |
def getFileExtension(f) :
"""Gets the file extension, and returns it (in all
lowercase). Returns None if file has no extension.
Keyword arguments:
f - file name possibly with path
"""
i = f.rfind(".")
return f[i+1:].lower() if i >= 0 and f.rfind("/") < i else None |
def _simplify_ast(raw_ast):
"""Simplify an AST that comes out of the parser
As well as replacing pyparsing's ParseResults with bare lists, this merges
adjacent non-condition words. For example, "a b" parses to ["a", "b"]. This
function merges that to ["a b"].
The idea is that this will be much more efficient to match against tags for
the vast majority of ASTs, which have many more raw words than they have
conditions.
A simplified AST is a list whose items are strings (representing bare
words) or tuples of the form (negated, flag, ast), where negated is a bool,
flag is a string and ast is another simplified ast.
"""
children = []
str_acc = []
for expr in raw_ast:
if isinstance(expr, str):
str_acc.append(expr)
continue
# We have a term that isn't a string. This must be a conditional. Join
# together any items in str_acc and add them to children then recurse
# to simplify the conditional's sub-expression.
if str_acc:
children.append(" ".join(str_acc))
str_acc = []
negated, flag, exprs = expr
children.append((negated, flag, _simplify_ast(exprs)))
if str_acc:
children.append(" ".join(str_acc))
return children |
def _generate_filepath(
window_size, step, correlation_method, sequential, outdir, kind, **
kwargs):
"""Computes a filepath for the reward csv file that allows us to identify
the properties of the experiment.
Args:
window_size (int): Size of the sliding window
step (int): Step of the sliding window
correlation_method (string): One of 'pearson',
sequential (bool): If the experiment is for the sequential or concurrent
metrics data.
outdir (string): Directory where to write the csv file
kind (string): One of 'continous', 'top' or 'threshold'
Returns:
string: The filepath of the reward csv file
"""
seq_or_con = 'seq' if sequential else 'con'
if kind == 'top':
kind += '_' + str(kwargs['L'])
elif kind == 'threshold':
kind += '_' + str(kwargs['threshold'])
filepath = (
"%s%s_rewards_w%d_s%d_%s_%s.csv" %
(outdir, seq_or_con, window_size, step, correlation_method, kind))
return filepath |
def get_breakout_payload(device_id, breakout_type, interface_id):
"""
Payload for breakout configuration.
:param device_id: device id
:param breakout_type: requested breakout type
:param interface_id: port number with service tag
:return: json
"""
payload = {
"Id": 0, "JobName": "Breakout Port", "JobDescription": "",
"Schedule": "startnow", "State": "Enabled",
"JobType": {"Id": 3, "Name": "DeviceAction_Task"},
"Params": [
{"Key": "breakoutType", "Value": breakout_type},
{"Key": "interfaceId", "Value": interface_id},
{"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}],
"Targets": [
{"JobId": 0, "Id": device_id, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}}
]}
return payload |
def Decimal2Binary(dec_num):
""" Return the binary representation of dec_num """
if dec_num == 0: return '0'
return (Decimal2Binary(dec_num >> 1) + str(dec_num % 2)) |
def execute(command):
"""
Executes a command on the local host.
:param str command: the command to be executed
:return: returns the output of the STDOUT or STDERR
"""
from subprocess import check_output, STDOUT
command = "{}; exit 0".format(command)
return check_output(command, stderr=STDOUT, shell=True) |
def signed_h(num:int) -> int:
"""Returns signed value of unsigned (or signed) 16-bit integer (struct fmt 'h')
"""
return ((num & 0xffff) ^ 0x8000) - 0x8000 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.