content stringlengths 42 6.51k |
|---|
def my_evalf(expr, chop=False):
"""
Enhanced sympy evalf to handle lists of expressions
and catch eval failures without dropping out.
"""
if isinstance(expr, list):
try:
return [x.evalf(chop=chop) for x in expr]
except Exception: # pylint: disable=broad-except
return expr
try:
return expr.evalf(chop=chop)
except Exception: # pylint: disable=broad-except
return expr |
def city_country(city, country):
"""Return a string like 'Santiago, Chile'."""
return f"{city.title()}, {country.title()}" |
def generate_control_variables(value):
""" Generate some set values for control variables to test against """
return {'value': value,
'valueAlarm': {'lowAlarmLimit': 2, 'lowWarningLimit': 3, 'highAlarmLimit': 10, 'highWarningLimit': 8},
'alarm': {'severity': 0},
'display': {'units': 'mV'},
'control': {'limitLow': 1, 'limitHigh': 11}
} |
def re_ordering(s):
"""
Split the string into a list,
loop through the list, then loop through each word checking if any letter is capital
if so, obtain the index of the word insert it at the beginning and remove it from old position
"""
k, reorder = s.split(), ""
for x in k:
for y in x:
if y.isupper():
ind = k.index(x)
k.insert(0, k.pop(ind))
return " ".join(k) |
def calc_finesse(FSR, FWHM):
"""
Returns the FP Finesse.
Parameters
----------
FSR (float) : free-spectral-range in BCV or A
FWHM (float) : full-width-at-half-maximum in BCV or A
Returns
-------
F (float) : the finesse
Observations
------------
Both FSR and FWHM have to have same units.
"""
return float(FSR) / float(FWHM) |
def get_d_pair(n, cond1, cond2):
"""
"""
d1 = [cond1[x] - cond2[x] for x in range(n)]
d21 = [x**2 for x in d1]
sumd1 = sum(d1)
sumd21 = sum(d21)
d_ = sumd1 / n
sd = ((sumd21 - (n * (d_**2))) / (n - 1))**0.5
sd_error = sd / (n**0.5)
d = d_ / sd_error
return d |
def authInsert(user, role, group, site):
"""
Authorization function for general insert
"""
if not role:
return True
for k, v in user['roles'].iteritems():
for g in v['group']:
if k in role.get(g, '').split(':'):
return True
return False |
def validate_connection_providertype(connection_providertype):
"""
Validate ProviderType for Connection
Property: Connection.ProviderType
"""
VALID_CONNECTION_PROVIDERTYPE = ["Bitbucket", "GitHub", "GitHubEnterpriseServer"]
if connection_providertype not in VALID_CONNECTION_PROVIDERTYPE:
raise ValueError(
"Connection ProviderType must be one of: %s"
% ", ".join(VALID_CONNECTION_PROVIDERTYPE)
)
return connection_providertype |
def andify(list_of_strings):
"""
Given a list of strings will join them with commas
and a final "and" word.
>>> andify(['Apples', 'Oranges', 'Mangos'])
'Apples, Oranges and Mangos'
"""
result = ', '.join(list_of_strings)
comma_index = result.rfind(',')
if comma_index > -1: result = result[:comma_index] + ' and' + result[comma_index+1:]
return result |
def decimalToFloat(obj):
"""
Function for/to <short description of `netpyne.sim.utils.decimalToFloat`>
Parameters
----------
obj : <type>
<Short description of obj>
**Default:** *required*
"""
from decimal import Decimal
if type(obj) == list:
for i,item in enumerate(obj):
if type(item) in [list, dict, tuple]:
decimalToFloat(item)
elif type(item) == Decimal:
obj[i] = float(item)
elif isinstance(obj, dict):
for key,val in obj.items():
if isinstance(val, (list, dict)):
decimalToFloat(val)
elif type(val) == Decimal:
obj[key] = float(val) # also replace empty dicts with empty list
return obj |
def remove_list_duplicates(input):
"""
"""
lis_out = []
for element in input:
if element not in lis_out:
lis_out.append(element)
return lis_out |
def h(p1, p2):
"""Manhattan distance"""
(x1, y1), (x2, y2) = p1, p2
return abs(x1 - x2) + abs(y1 - y2) |
def NonetoemptyList(XS):
"""
Convert a None type to empty list
@args XS: None type
@type XS: str
"""
return [] if XS is None else XS |
def binary_search(items_list, item,
recursive=False, first_ptr=-1, last_ptr=-1):
"""
Searches the list 'items_list' for an item 'item'
using binary search algorithm
>>> from pydsa import binary_search
>>> a = [1, 2, 7, 9, 10, 33, 56, 70, 99]
>>> binary_search(a, 9)
3
>>> binary_search(a, 13)
-1
>>> binary_search(a, 13, True)
-1
>>> binary_search(a, 56, True)
6
>>> binary_search(a, 56, True, 0, 5)
-1
Binary Search
Time Complexity:
Average - O(log(n))
Best - O(1)
Worst - O(log(n))
Return value: Index where element exists
-1 if element not found
Input parameters: [items_list # List from which to search for item,
item # Item to search for,
recursive = True,
first_ptr = 0 # start of range inclusive,
last_ptr = len(items_list) # end of range inclusive]
List provided should be sorted.
"""
if first_ptr == last_ptr == -1:
first_ptr = 0
last_ptr = len(items_list) - 1
if recursive:
if last_ptr < first_ptr:
return -1 # element not found.
else:
mid_ptr = first_ptr + ((last_ptr - first_ptr) // 2)
if item == items_list[mid_ptr]:
return mid_ptr
elif item < items_list[mid_ptr]:
return binary_search(items_list, item,
True, first_ptr, mid_ptr - 1)
else: # item > items_list[mid_ptr]
return binary_search(items_list, item,
True, mid_ptr + 1, last_ptr)
else:
found = False
index = -1 # element not found
first_ptr = 0
last_ptr = len(items_list) - 1
while first_ptr <= last_ptr and not found:
midpoint = (first_ptr + last_ptr) // 2
if items_list[midpoint] == item:
found = True
index = items_list.index(item)
else:
if item < items_list[midpoint]:
last_ptr = midpoint - 1
else:
first_ptr = midpoint + 1
return index |
def update(config1, config2):
"""Merges configs 1 and 2 by section, which (unlike dict.update()) ensures
no fields in overlapping sections are lost. Result is in effect the
union of settings from both files, with settings from config2 overriding
those in config1. Does not modify either original config dictionary.
"""
config3 = config1.copy()
for section in config2.keys():
if section in config3:
config3[section].update(config2[section])
else:
config3[section] = config2[section]
return config3 |
def strFromPath(path):
"""
Construct a string that represents the Python code required to retrieve a value from
the structure.
"""
def fmt(x):
if type(x) == str:
return "['{}']".format(x)
elif type(x) == int:
return "[{}]".format(x)
else:
return x
return ''.join([fmt(x) for x in path]) |
def handle_quota_spec(quota):
"""
Process an arbitrary quota specification
Given a quota specification as a string,
returns the operation and amount.
The specification must be of the form:
[OPERATION]AMOUNT
where OPERATION is one of '=','+' or '-'
and defaults to '=' if not present.
"""
# Valid operations
valid_operations = ('=','+','-')
# Get operation
if quota[0] in valid_operations:
operation = quota[0]
amount = quota[1:]
else:
operation = '='
amount = quota
return (operation,amount) |
def _get_added_comment_id(src):
"""Returns comment ID from given request."""
if not src:
return None
actions = src.get('actions') or {}
related = actions.get('add_related') or []
if not related:
return None
related_obj = related[0]
if related_obj.get('type') != 'Comment':
return None
return related_obj.get('id') |
def perc_bounds(percent_filter):
"""
Convert +/- percentage to decimals to be used to determine bounds.
Parameters
----------
percent_filter : float or tuple, default None
Percentage or tuple of percentages used to filter around reporting
irradiance in the irr_rc_balanced function. Required argument when
irr_bal is True.
Returns
-------
tuple
Decimal versions of the percent irradiance filter. 0.8 and 1.2 would be
returned when passing 20 to the input.
"""
if isinstance(percent_filter, tuple):
perc_low = percent_filter[0] / 100
perc_high = percent_filter[1] / 100
else:
perc_low = percent_filter / 100
perc_high = percent_filter / 100
low = 1 - (perc_low)
high = 1 + (perc_high)
return (low, high) |
def gridIndexToSingleGridIndex(ix, iy, iz, nx, ny, nz):
"""
Convert a grid index (3 indices) into a single grid index:
:param ix, iy, iz: (int) grid index in x-, y-, z-axis direction
:param nx, ny, nz: (int) number of grid cells in each direction
:return: i: (int) single grid index
Note: ix, iy, iz can be ndarray of same shape, then
i in output is ndarray of that shape
"""
return ix + nx * (iy + ny * iz) |
def get_party_info(info):
"""
Parse the info and return name, email.
"""
if not info:
return
if '@' in info and '<' in info:
splits = info.strip().strip(',').strip('>').split('<')
name = splits[0].strip()
email = splits[1].strip()
else:
name = info
email = None
return name, email |
def init_nested_dict_zero(sector, first_level_keys, second_level_keys):
"""Initialise a nested dictionary with two levels
Arguments
----------
first_level_keys : list
First level data
second_level_keys : list
Data to add in nested dict
Returns
-------
nested_dict : dict
Nested 2 level dictionary
"""
nested_dict = {}
for first_level_key in first_level_keys:
nested_dict[first_level_key] = {}
nested_dict[first_level_key][sector] = {}
for second_level_key in second_level_keys:
nested_dict[first_level_key][sector][second_level_key] = 0
return nested_dict |
def sucrose (inverted_sugar_pre_hidrolisys, inverted_sugar_post_hidrolisys):
"""
Function to calculate the sucrose percentage in honey
"""
inverted_sugar_from_sucrose = inverted_sugar_post_hidrolisys - inverted_sugar_pre_hidrolisys
sucrose = inverted_sugar_from_sucrose * 0.95 ## 0.95 is the convertion factor for inverted sugar to sucrose
return sucrose |
def fallingfactorial(n, m):
"""
Return the falling factorial; n to the m falling, i.e. n(n-1)..(n-m+1).
For Example:
>>> fallingfactorial(7, 3)
210
"""
r = 1
for i in range(n, n-m, -1):
r *= i
return r |
def calc_growing_media(total_sales):
"""Calculate Growing Media costs Function
Args:
total_sales (list): The total sales as a annual time series
Returns:
cogs_cogs_media (list): Cost of Goods Sold expenditure on Growing Media as a time series for each year
To Do:
Currently taken as an estimate as 2.5% of sales and should be improved to be dependant on growing media selected
Similar to packaging, should take into consideration economic order quantity
"""
percent_of_growing_media_to_sales = 0.025
cogs_media = [i * percent_of_growing_media_to_sales for i in total_sales]
#scenario.growing_media * price_of_media * no_of_plants
return cogs_media |
def secs_to_string(secs):
"""Convert seconds to a string with days, hours, and minutes"""
str_list = []
for (label, interval) in (('day', 86400), ('hour', 3600), ('minute', 60)):
amt = int(secs / interval)
plural = u'' if amt == 1 else u's'
str_list.append(u"%d %s%s" % (amt, label, plural))
secs %= interval
ans = ', '.join(str_list)
return ans |
def getL(s):
"""
whether the given s is shell symbol
if it is, we return it's L Code
else return -1
"""
if s == "S" or s == "s":
return 0
elif s == "P" or s == "p":
return 1
elif s == "SP" or s == "sp":
return 100
elif s == "D" or s == "d":
return 2
elif s == "F" or s == "f":
return 3
elif s == "G" or s == "g":
return 4
elif s == "H" or s == "h":
return 5
elif s == "I" or s == "i":
return 6
elif s == "K" or s == "k":
return 7
elif s == "L" or s == "l":
return 8
elif s == "M" or s == "m":
return 9
elif s == "N" or s == "n":
return 10
elif s == "O" or s == "o":
return 11
elif s == "Q" or s == "q":
return 12
elif s == "R" or s == "r":
return 13
elif s == "T" or s == "t":
return 14
elif s == "U" or s == "u":
return 15
elif s == "V" or s == "v":
return 16
elif s == "W" or s == "w":
return 17
elif s == "X" or s == "x":
return 18
elif s == "Y" or s == "y":
return 19
elif s == "Z" or s == "z":
return 20
else:
# if L>20, then L symbol
# is defined as L+number
# we will see whether this
# is the case
if s[0] == "L" and len(s) > 1:
n = s[1:]
if n.isdigit():
return int(n)
# now this is not the
# angular momentum
return -1 |
def find_line_eq(Z1, Z2):
"""Find the line equation (Ax+By=C)
Parameters
----------
Z1 : complex
Complex coordinate of a point on the line
Z2 : complex
Complex coordinate of another point on the line
Returns
-------
A, B, C : (float, float, float)
Line equation parameters
"""
A = Z1.imag - Z2.imag
B = Z2.real - Z1.real
C = Z1.real * Z2.imag - Z2.real * Z1.imag
return (A, B, -C) |
def tuplify_json_list(list_object: list) -> tuple:
"""
JSON.dump() stores tuples as JSON lists. This function receives a list (with sub lists)
and creates a tuple of tuples from the list. The tuples are the preferred input type for py2neo.
E.g.
[[a, b], [c, d]] -> ((a, b), (c, d))
:param list_object: A list with sub-lists.
:return: A tuple version of the list object.
"""
output = tuple()
for element in list_object:
if isinstance(element, list):
output = output + (tuple(element), )
else:
output = output + (element, )
return output |
def orthogonal_vector(vector):
""" Given a vector, returns a orthogonal/perpendicular vector of equal length.
Returns
------
(float, float): A vector that points in the direction orthogonal to vector.
"""
return -1 * vector[1], vector[0] |
def get_app_or_pod_id(app_or_pod):
"""Gets the app or pod ID from the given app or pod
:param app_or_pod: app or pod definition
:type app_or_pod: requests.Response
:return: app or pod id
:rtype: str
"""
return app_or_pod.get('app', app_or_pod.get('pod', {})).get('id') |
def extractLats(listoftups):
"""Given a sequence of tuples the first two elements of which are
longitude and latitude, return list of lons"""
return [item[1] for item in listoftups] |
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2 + 0.0001
# return 0.75 * gamma * max_value(func, 0.)
return gamma
# Function to control PF mobility over time. |
def parse_album_header(album_header):
"""Split the album title in half, to retrieve its name an year.
Examples::
>>> album_title = 'His Young Heart (2011)'
>>> split_album_title(album_title)
(His Young Heart, 2011)
Args:
album_header (string): album header / title to split
Returns:
tuple: album name and year.
"""
album_parts = album_header.split('(')
if len(album_parts) > 1:
album_name = '('.join(album_parts[:-1]).strip()
album_year = album_parts[-1].replace(')', '').strip()
else:
album_name = album_header.strip()
album_year = None
return album_name, album_year |
def modular_inverse(a, p):
"""find the modular inverse s.t. aa^-1 mod p = 1"""
for b in range(1, p):
if (a * b) % p == 1:
return b |
def color_red(text):
"""
Applies terminal control sequence for red color
"""
result = "\033[31m{}\033[0m".format(text)
return result |
def _make_pointer_increments(name, nargs, sig):
"""Increments array pointers following each function call."""
inclist = ""
for i in range(nargs):
if sig[i] in "vpf":
inclist += "++%s%d; " % (name, i,)
return inclist |
def bytes2snap(nof_bytes: int) -> str:
"""
Convert nof bytes into snap-compatible Java options.
.. code-block:: python
>>> bytes2snap(32000)
'31K'
Args:
nof_bytes (int): Byte nb
Returns:
str: Human-readable in bits
"""
symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
prefix = {}
for idx, sym in enumerate(symbols):
prefix[sym] = 1 << (idx + 1) * 10
for sym in reversed(symbols):
if nof_bytes >= prefix[sym]:
value = int(float(nof_bytes) / prefix[sym])
return "%s%s" % (value, sym)
return "%sB" % nof_bytes |
def job_dfn_list_dict(job_dict):
"""A job definition list represented as a dictionary."""
return {"jobs": [job_dict]} |
def get_value(values, keys: list, default=None):
"""
returns value from json based on given key hierarchy
Ex:
val_map = {'one' : {'two' : 123 }}
get_value(val_map, ['one', 'two']) returns 123
@param values: json object
@param keys: list keys from the hierarchy tree
@param default: default value to return if the key is not found
@return: value if exists
default otherwise
"""
if values is None:
return default
for key in keys:
if key in values:
values = values[key]
else:
return default
return values if keys else default |
def _GetOpIds(ops):
"""Returns C{OP_ID} for all opcodes in passed sequence.
"""
return sorted(opcls.OP_ID for opcls in ops) |
def split_and_strip(input_string, delim=","):
"""Convert a string into a list using the given delimiter"""
if not input_string: return list()
return map(str.strip, input_string.split(delim)) |
def _split_text_into_n_parts(n, text, output_dir):
"""
Splits text into `n` approximately equal parts.
The splitting is permformed only at paragraphs' boundaries.
"""
l = len(text) // n
texts = []
cur_part_start = 0
for i in range(len(text)):
if i >= cur_part_start + l and text[i] == text[i+1] == '\n':
texts.append(text[cur_part_start:i+2])
cur_part_start = i + 2
texts.append(text[cur_part_start:])
return texts |
def get_prefix(n, factor=1024, prefixes=None):
"""Get magnitude prefix for number."""
if prefixes is None:
prefixes = ('',) + tuple('kMGTPEZY')
if abs(n) < factor or len(prefixes) == 1:
return n, prefixes[0]
return get_prefix(n / factor, factor=factor, prefixes=prefixes[1:]) |
def _break_long_text(text, maximum_length=75):
"""
Breaks into lines of 75 character maximum length that are terminated by a backslash.
"""
def next_line(remaining_text):
# Returns a line and the remaining text
if '\n' in remaining_text and remaining_text.index('\n') < maximum_length:
i = remaining_text.index('\n')
return remaining_text[:i+1], remaining_text[i+2:]
elif len(remaining_text) > maximum_length and ' ' in remaining_text:
i = remaining_text[:maximum_length].rfind(' ')
return remaining_text[:i+1] + '\\\n', remaining_text[i+2:]
else:
return remaining_text, ''
remaining_text = text
lines = []
while remaining_text:
line, remaining_text = next_line(remaining_text)
lines += [line]
return ''.join(lines) |
def identity(x):
"""This has the side-effect of bubbling any exceptions we failed to process
in C land
"""
import sys # noqa
return x |
def _moduleExist( moduleNames ):
"""Checks that the specified module can be loaded in the current python path.
moduleNames is a list containing every element of the module name. For example,
module 'Data.Character.Robot' would be [ 'Data', 'Character', 'Robot' ] """
import sys, os
for path in sys.path :
fileName = os.path.join( path, *moduleNames )
if os.path.exists( fileName + '.py' ) : return True
return False |
def filter_batch(batch, i):
"""check whether sample i should be included"""
return batch["lang"][i] in {"da"} and batch["passed_quality_filter"][i] is True |
def sum_digits(y):
"""Sum all the digits of y.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
>>> a = sum_digits(123) # make sure that you are using return rather than print
>>> a
6
"""
"*** YOUR CODE HERE ***"
sum = 0
while(y > 0):
sum += y % 10
y = y // 10
return sum |
def to_spec(reg_id, kwargs):
"""Return build spec from id and arguments."""
return {
'type': reg_id,
'args': kwargs
} |
def find_groupby_names(url):
"""
Get the groupBy names
:return:
"""
return [name.strip("/") for name in url.split("groupBy")[1:]] |
def make_matrix_from_axes(x,y,z):
"""construct orientation matrix from basis vectors"""
return ( x[0], y[0], z[0],
x[1], y[1], z[1],
x[2], y[2], z[2] ) |
def extract_values(obj, key):
"""Recursively pull values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Return all matching values in an object."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
# if "Scan" in v:
# print(v)
# pdb.set_trace()
# if "Join" in v:
# print(obj)
# pdb.set_trace()
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results |
def interval_prime_sieve(block_start_i, block_sz):
""" create a block where index 0 represents int block_start_i and the values
in the block are True iff the corresponding index is prime """
block = [True] * block_sz
for i in range(2, block_start_i):
# compute the offset from the first block
offset = block_start_i % i
if offset:
offset = i - offset
# sieve on the block
for block_index in range(offset, block_sz, i):
block[block_index] = False
return block |
def class_name_from_module_name(module_name):
"""Takes a module name and returns the name of the class it
defines.
If the module name contains dashes, they are replaced with
underscores.
Example::
>>> class_name_from_module_name('with-dashes')
'WithDashes'
>>> class_name_from_module_name('with_underscores')
'WithUnderscores'
>>> class_name_from_module_name('oneword')
'Oneword'
"""
words = module_name.replace('-', '_').split('_')
return ''.join(w.title() for w in words) |
def sales_growth_rate(sales_period_1, sales_period_2):
"""Return the sales growth rate for the current period versus the previous period.
Args:
sales_period_1 (float): Total company sales for previous the period.
sales_period_2 (float): Total company sales for the current period.
Returns:
Sales growth based on sales in period 2 versus period 1.
"""
return ((sales_period_2 - sales_period_1) / sales_period_1) * 100 |
def string_remove(str1: str, str2: str) -> str:
"""
Remove all instances of the second string from the first string.
:param str1: The string from which to remove strings.
:param str2: The string to remove.
:returns: The string after removing desired strings.
"""
if not isinstance(str1, str):
raise TypeError(f"Parameter str1 must be a string, received {type(str1)}.")
if not isinstance(str2, str):
raise TypeError(f"Parameter name must be a string, received {type(str1)}.")
return str1.replace(str2, "") |
def join_paths(path, files, extension):
"""Method to merge paths."""
if extension is not None:
return [path / (x + extension) for x in files]
return [path / x for x in files] |
def get_show_table_columns(table):
"""
Gets the query of SHOW COLUMNS for a given table.
:type str
:param table: A table name
:rtype str
:return A query
"""
return 'SHOW COLUMNS FROM `{:s}`'.format(table) |
def escape_filter_exp(filter_exp: str):
"""
Escapes the special characters in an LDAP filter based on RFC 4515.
:param str filter_exp: the unescaped filter expression.
:return: the escaped filter expression.
:rtype: str
"""
chars_to_escape = (
("\\", "\\5C"),
("*", "\\2A"),
("(", "\\28"),
(")", "\\29"),
("\0", "\\0"),
)
for char, repl in chars_to_escape:
filter_exp = filter_exp.replace(char, repl)
return filter_exp |
def velocity_move(x_coordinate, y_coordinate, velocity_x, velocity_y, boids_number):
"""Move according to velocities"""
for i in range(boids_number):
x_coordinate[i] = x_coordinate[i] + velocity_x[i]
y_coordinate[i] = y_coordinate[i] + velocity_y[i]
return x_coordinate, y_coordinate |
def get_reactions_producing(complexes, reactions):
""" dict: maps complexes to lists of reactions where they appear as a product. """
return {c: [r for r in reactions if (c in r.products)] for c in complexes} |
def descope_queue_name(scoped_name):
"""Descope Queue name with '.'.
Returns the queue name from the scoped name
which is of the form project-id.queue-name
"""
return scoped_name.split('.')[1] |
def splitAt( s, i, gap=0 ):
"""split s into two strings at index i with an optional gap"""
return s[:i], s[i+gap:] |
def _sharded_checkpoint_pattern(process_index, process_count):
"""Returns the sharded checkpoint prefix."""
return f"shard-{process_index:05d}-of-{process_count:05d}_checkpoint_" |
def zone_is_boost_active(zone):
"""
Is the boost active for the zone
"""
return zone["isboostactive"] |
def transform_lowercase(val, mode=None):
"""
Convert to lowercase
<dotted>|lowercase string to lowercase
<dotted>|lowercase:force string to lowercase or raises
"""
try:
return val.lower()
except TypeError:
if mode == 'force':
raise
return val |
def title_formatter(title: str) -> str:
"""
This method builds a reusable title for each argument section
"""
return f"\n{title}" |
def file_scanning(paths):
"""
Args:
paths (str/list/tuple): a directory path or
a (potentially nested) list/tuple of directory paths
Returns:
A list of all files under `paths`
"""
import os
import typing
if isinstance(paths, typing.List) or isinstance(paths, typing.Tuple):
filepaths = []
for d in paths:
filepaths.extend(file_scanning(d))
return filepaths
elif isinstance(paths, str):
return [os.path.join(root, fn)
for root, _, filenames in os.walk(paths)
for fn in filenames]
else:
raise TypeError("Argument type not accepted: {}".format(paths)) |
def count_bits(number):
"""This function is the solution to the Codewars Bit Counting that
can be found at:
https://www.codewars.com/kata/526571aae218b8ee490006f4/train/python"""
bits = []
quotient = number
while divmod(quotient, 2)[0] != 0:
quotient, remainder = divmod(quotient, 2)
bits.append(remainder)
bits.append(quotient)
return bits.count(1) |
def terrain(x):
"""Interpolate the noise value and return the terrain type.
"""
if x <-1:
return {"name": "deep water", "color": (0, 0, 100),}
elif -1 <= x <= -0.5:
return {"name": "water", "color": (0, 0, 180)}
elif -0.5 < x <= -0.3:
return {"name": "shallow water", "color": (0, 0, 230)}
elif -0.3 < x <= 0.1:
return {"name": "beach", "color": (244, 164, 96)}
elif 0.1 < x <= 0.4:
return {"name": "grass", "color": (127, 255, 0)}
elif 0.4 < x <= 1:
return {"name": "forest", "color": (0, 128, 0)}
else:
#x > -1
return {"name": "deep forest", "color": (0, 50, 0)} |
def pColorIa( color ):
""" returns the likelihood of observing
host galaxy with the given rest-frame
B-K color, assuming the SN is a Ia
RETURNS : P(B-K|Ia)
"""
if color < 3 : return( 0.240, 0.05, 0.05 )
elif color < 4 : return( 0.578, 0.05, 0.05 )
else : return( 0.183, 0.05, 0.05 ) |
def getTweetPlaceFullname(tweet):
""" If included, read out tweet full name place """
if 'place' in tweet and \
tweet['place'] is not None and \
'full_name' in tweet['place'] :
return tweet['place']['full_name']
else :
return None |
def pinpoint_event():
""" Generates A Pinpoint Event"""
return {
"Message": {},
"ApplicationId": "71b0f21869ac444eb0185d43539b97ea",
"CampaignId": "54115c33de414441b604a71f59a2ccc3",
"TreatmentId": "0",
"ActivityId": "ecf06111556d4c1ca09b1b197469a61a",
"ScheduledTime": "2020-04-19T00:33:24.609Z",
"Endpoints": {
"EndpointId-1234": {
"ChannelType": "CUSTOM",
"Address": "+14255555555",
"EndpointStatus": "ACTIVE",
"OptOut": "NONE",
"Location": {
"Country": "USA"
},
"Demographic": {
"Make": "Apple",
"Platform": "ios"
},
"EffectiveDate": "2020-04-03T22:23:23.597Z",
"Attributes": {
"FirstName": [
"Test"
]
},
"User": {
"UserId": "austin52789"
},
"CreationDate": "2020-04-03T22:23:23.597Z"
}
}
} |
def _cast(value):
"""
Cast input strings to their 'natural' types. Strip single quotes
from strings.
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value.strip() == 'T':
return True
if value.strip() == 'F':
return False
return value.strip("'") |
def style_str(s, color='black', weight=300):
"""
Style the supplied string with HTML tags
Args:
s (str):
string to format
color( str ):
color to show the string in
weight( int ):
how thick the string will be displayed
Returns:
html(string) : html representation of the string
"""
return f'<text style="color:{color}; font-weight:{weight}" >{s}</text>' |
def get_total_class_count(classes, idx2classes, old_total_class_count=None, removed_entry_dict=None):
"""
Function which calculates the counts for each of the classes - i.e. given the idx2classes dictionary, how many times
does each of the classes (defined by the classes list) appear?
This can be calculate in one of two ways:
(1) Directly from the idx2classes dictionary with the classes list. This is computationally a little expensive
in that each entry in idx2classes dictionary, but gives a ground truthe.
(2) Using an old total_class_count dictionary and a removed_entry_dict which reflects a single entry that is to
be removed. To expand a bit on this, if an old_total_class_count dictionary is provided, it should be a dictionary
of the format {'class name':class_count,...}. Then, a removed_entry_dict should be of the same format, an the
returned total_class_count dictionary subtracts the counts for the removed_entry_dict from the old_total_class_count
dictionary.
The first option is a de novo calculation, whereas the second one is an almost instantaneous correction, although we
do allocate a new dictionary so the return variable is a different instantiation than the old_total_class_count.
Parameters
-------------
classes : list
List of valid classes. All classes in the idx2classes dictionary should be represented in the
classes list, and, similarly, every dictionary in the idx2classes should map between each of
the classes and a count (even if the count is 0).
idx2classes : dict
Dictionary that maps index (starting at 0) to a second dictionary, which itself maps the
class name to the number of instances that class pops up.
old_total_class_count : dict
If provided, this should be a single dictionary that maps class to total count.
Default = None.
removed_entry_dict : dict
If provided, this should be a single dictionary that maps class to a count. The counts here
are subtracted from the old_total_class_count
Returns
------------
dict
Dict which maps between class identity and total counts for that class
Raises
------------
Exception
"""
# if both removed entry dict and old_total_class_count are provdied
if removed_entry_dict is not None and old_total_class_count is not None:
# update the old_total_class_count and return - note we do allocate a
# new dictionary in case this function ever is used in a scenario where
# pass by reference would be a problem
new_dict = {}
for c in old_total_class_count:
new_dict[c] = old_total_class_count[c] - removed_entry_dict[c]
return new_dict
# if only one of these two is allocate through an exception
if removed_entry_dict is not None or old_total_class_count is not None:
raise Exception('If removed_entry_dict is provided then old_total_class_count must also be provided (and vice versa)')
# If we get here we're calculating the total_class_count from scratch
total_class_count = {}
# initialize
for c in classes:
total_class_count[c] = 0
# for each entry
for idx in idx2classes:
for c in classes:
total_class_count[c] = total_class_count[c] + idx2classes[idx][c]
return total_class_count |
def filter_ldconfig_process(ps_rows):
"""
Sometimes an ldconfig process running under the django user shows up.
Filter it out.
:param ps_rows: A list of PsRow objects.
"""
return [row for row in ps_rows
if not (row.ruser == 'django' and 'ldconfig' in row.args)] |
def myconverter(o):
"""Use json.dumps(data, default=myconverter)."""
import datetime
if isinstance(o, datetime.datetime):
return o.__str__() |
def custom_format_old(source, language, class_name, options, md):
"""Custom format."""
return '<div lang="%s" class_name="class-%s", option="%s">%s</div>' % (language, class_name, options['opt'], source) |
def make_keyword_html(keywords):
"""This function makes a section of HTML code for a list of keywords.
Args:
keywords: A list of strings where each string is a keyword.
Returns:
A string containing HTML code for displaying keywords, for example:
'<strong>Ausgangswörter:</strong> Nature, Plants, Fauna'
"""
res_html = '<strong>Ausgangswörter:</strong> '
for word in keywords[:-1]:
res_html += word + ', '
res_html += keywords[-1]
return res_html |
def key_match(key1, key2):
"""determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *.
For example, "/foo/bar" matches "/foo/*"
"""
i = key2.find("*")
if i == -1:
return key1 == key2
if len(key1) > i:
return key1[:i] == key2[:i]
return key1 == key2[:i] |
def get_block_devices(bdms=None):
"""
@type bdms: list
"""
ret = ""
if bdms:
for bdm in bdms:
ret += "{0}\n".format(bdm.get('DeviceName', '-'))
ebs = bdm.get('Ebs')
if ebs:
ret += " Status: {0}\n".format(ebs.get('Status', '-'))
ret += " Snapshot Id: {0}\n".format(ebs.get('SnapshotId', '-'))
ret += " Volume Size: {0}\n".format(ebs.get('VolumeSize', '-'))
ret += " Volume Type: {0}\n".format(ebs.get('VolumeType', '-'))
ret += " Encrypted: {0}\n".format(str(ebs.get('Encrypted', '-')))
ret += " Delete on Termination: {0}\n".format(ebs.get('DeleteOnTermination', '-'))
ret += " Attach Time: {0}\n".format(str(ebs.get('AttachTime', '-')))
return ret.rstrip()
else:
return ret |
def bytes_r(b):
"""Reverse a bytes-like object."""
return bytes(reversed(b)) |
def target_index(source: int, target: int) -> int:
"""Returns the index of the target channel, skipping the index of the self channel.
Used in inter CKS/CKR communication"""
assert source != target
if target < source:
return target
return target - 1 |
def normalize_data(indata, prop):
"""
Transforms Wikidata results into the neccessary format
to be added to SoNAR
---------
indata : str
result of query
prop : str
type of property (birthdate, deathdate etc.)
Returns
---------
list.
"""
if prop == "birthdate" or "deathdate":
result = indata.strip("T00:00:00Z")
result = result.split("-") # format: yyyy-mm-dd
return result |
def in_nested_list(my_list, item):
"""
Determines if an item is in my_list, even if nested in a lower-level list.
"""
if item in my_list:
return True
else:
return any(in_nested_list(sublist, item) for sublist in my_list if
isinstance(sublist, list)) |
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower() |
def get_character_limit(pdf_field_tuple, char_width=6, row_height=12):
"""
Take the pdf_field_tuple and estimate the number of characters that can fit
in the field, based on the x/y bounding box.
0: horizontal start
1: vertical start
2: horizontal end
3: vertical end
"""
# Make sure it's the right kind of tuple
if len(pdf_field_tuple) < 3 or (pdf_field_tuple[3] and len(pdf_field_tuple[3]) < 4):
return None # we can't really guess
# Did a little testing for typical field width/number of chars with both w and e.
# 176 = 25-34 chars. from w to e
# 121 = 17-22
# Average about 6 pixels width per character
# about 12 pixels high is one row
length = pdf_field_tuple[3][2] - pdf_field_tuple[3][0]
height = pdf_field_tuple[3][3] - pdf_field_tuple[3][1]
num_rows = int(height / row_height) if height > 12 else 1
num_cols = int(length / char_width )
max_chars = num_rows * num_cols
return max_chars |
def get_attributes(obj):
"""Get and format the attributes of an object.
Parameters
----------
section
An object that has attributes.
Returns
-------
dict
The object's attributes.
"""
attrs = obj.__dict__.copy()
attrs_fmtd = {}
for key in attrs:
key_fmtd = key.lstrip("_")
attrs_fmtd[key_fmtd] = attrs[key]
return attrs_fmtd |
def __get_bit_string(value):
"""INTERNAL.
Get string representation of an int in binary
"""
return "{0:b}".format(value).zfill(8) |
def is_between(lo: float, x: float, hi: float) -> bool:
"""Checks if `x` is between the `lo` and `hi` arguments."""
return lo < x < hi or lo > x > hi |
def transform_eq(transformer, rhs):
"""
Return an object that can be compared to another object after transforming
that other object.
The returned object will keep a log of equality checks done to it, and when
formatted as a string (with ``repr``), will show the history of transformed
objects and the ``rhs``.
:param transformer: a function that takes the compared objects and returns
a transformed version
:param rhs: the actual data that should be compared with the result of
transforming the compared object
"""
class TransformedEq(object):
def __init__(self):
self.comparisons = []
def __eq__(self, other):
transformed = transformer(other)
self.comparisons.append(transformed)
return transformed == rhs
def __ne__(self, other):
return not self == other
def __repr__(self):
return "<TransformedEq comparisons=%r, operand=%r>" % (
self.comparisons, rhs)
return TransformedEq() |
def word_distance(word1, word2):
"""Computes the number of differences between two words.
word1, word2: strings
Returns: integer
"""
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count |
def gather(items, attribute):
"""Gather children DAOs into a list under a shared parent.
A+1 A+[1,3,5]
B+C B+[C, x]
A+3 -->
A+5
B+x
A shared parent (A or B in the example) is identified by
primary key.
1, 3, 5, C and x are joined DAOs named by 'attribute'. These
are gathered into a list.
Arguments:
items - list of DAOs
attribute - name of joined child
"""
if not items:
return None
group = {}
for item in items:
child = getattr(item, attribute)
pk = getattr(item, item._fields.pk)
if pk not in group:
group[pk] = item
joined = item.__dict__.get('_tables', {})
joined[attribute] = []
parent = group[pk]
if child:
getattr(parent, attribute).append(child)
return [v for v in group.values()] |
def is_any_in_txt(txt_list, within_txt):
"""
Within (txt_list), is there one item contained in within_txt ?
Example: (['a', 'b'], 'ab') --> Yes, a is contained in ab
"""
for x in txt_list:
if x in within_txt:
return True
return False |
def lzip(*args):
"""
zip(...) but returns list of lists instead of list of tuples
"""
return [list(el) for el in zip(*args)] |
def decode_bigint_bitvec(bitvec):
"""Decode a Bit-Endian integer from a vector of bits"""
return int(''.join(str(bit) for bit in bitvec), 2) |
def join_condition(suburb_bound, forest_bound):
"""
Condition that suburb has intersection with the forest polygon
"""
def intersect(box_a, box_b):
a_min_x, a_min_y, a_max_x, a_max_y = box_a
b_min_x, b_min_y, b_max_x, b_max_y = box_b
return a_min_y <= b_max_y and \
a_max_x >= b_min_x and \
a_max_y >= b_min_y and \
a_min_x <= b_max_x
for sub in suburb_bound:
if intersect(sub, forest_bound):
return True
return False |
def inp(X):
"""
Allows to parameterize RMinimum with an INT or a list.
Automatically generates a list from the first X numbers, i.e. [0, ..., X-1].
:param X: Either an INT or a list
:return: list = [0,...,X] when X is INT, otherwise it returns X
"""
try:
return [i for i in range(X)]
except ValueError:
return X |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.