content
stringlengths 42
6.51k
|
|---|
def check_classes_to_swap(source, target):
"""Check which item classes are better to use on a swap."""
classes = {}
for class_, percentage in source['percentages'].items():
if class_ not in classes:
classes[class_] = 0.0
classes[class_] = percentage - target['percentages'][class_]
diff = sorted([(key, value)
for key, value in classes.items()], key=lambda elm: elm[1])
return int(diff[0][0]), int(diff[-1][0])
|
def _nt_sum(cobj, prop, theta):
"""
Create sum expressions in n-t forms (sum(n[i]*theta**t[i]))
Args:
cobj: Component object that will contain the parameters
prop: name of property parameters are associated with
theta: expression or variable to use for theta in expression
Returns:
Pyomo expression of sum term
"""
# Build sum term
i = 1
s = 0
while True:
try:
ni = getattr(cobj, f"{prop}_coeff_n{i}")
ti = getattr(cobj, f"{prop}_coeff_t{i}")
s += ni * theta**ti
i += 1
except AttributeError:
break
return s
|
def fix_links(links):
"""
The fix_links function removes multiple references to the same article
allowing only one representant
"""
if links:
result = [links[0]]
for i in range(1,len(links)):
test = [True for elem in result if elem in links[i]]
if not test:
result.append(links[i])
return result
else: return links
|
def digit(number: int, n: int) -> int:
"""Indexes integer without type conversion (e.g digit(253, 1) returns 5)\n
Index of number is n
"""
return number // 10 ** n % 10
|
def ifnone(value, ifnone='~'):
""" Pass a string other than "None" back if the value is None.
Used in datestamp handling.
"""
if value is None:
return ifnone
return value
|
def split_extend(seq, sep, length):
"""
Splits on a character, but also output a list of defined length.
Used the first field of the split result to pad out the requested length.
eg
>>> split_extend("A;B;C", ';', 3)
['A', 'B', 'C']
>>> split_extend("A", ';', 3)
['A', 'A', 'A']
>>> split_extend("A;B", ';', 4)
['A', 'B', 'A', 'A']
:param seq:
:type seq:
:param sep:
:type sep:
:param length:
:type length:
:return:
:rtype:
"""
s = seq.split(sep)
s.extend([s[0]] * (length - len(s)))
return s
|
def group(group_name):
"""Marks this field as belonging to a group"""
tag = "group:%s" % group_name
return tag
|
def _convert_valid_actions(valid_actions):
"""Convert provided valid_actions for gRPC.
Args:
valid_actions: Either None, a list of bools or a numpy boolean array.
Returns:
None if valid_actions is None. Otherwise, a list of bools.
"""
if valid_actions is None:
return None
return list(map(bool, valid_actions))
|
def links_to_graph(links):
"""
Changes links to undirected graph
"""
graph = {} # change in dictionary graph
for u, v in links: # make undirected graph
if u not in graph:
graph[u] = []
if v not in graph:
graph[v] = []
graph[u].append((1, v))
graph[v].append((1, u))
return graph
|
def lookup_discrete(x, xs, ys):
"""
Intermediate values take on the value associated with the next lower x-coordinate (also called a step-wise function). The last two points of a discrete graphical function must have the same y value.
Out-of-range values are the same as the closest endpoint (i.e, no extrapolation is performed).
"""
for index in range(0, len(xs)):
if x < xs[index]:
return ys[index - 1] if index > 0 else ys[index]
return ys[len(ys) - 1]
|
def select_all_table(table: str) -> str:
"""
select_all_table is used for returning all the table data.
:param table: return stock table data
:return:
"""
sql_query = f"SELECT * FROM {table}"
return sql_query
|
def pig_actions_d(state):
"""The legal actions from a state. Usually, ["roll", "hold"].
Exceptions: If double is "double", can only "accept" or "decline".
Can't "hold" if pending is 0.
If double is 1, can "double" (in addition to other moves).
(If double > 1, cannot "double").
"""
# state is like before, but with one more component, double,
# which is 1 or 2 to denote the value of the game, or 'double'
# for the moment at which one player has doubled and is waiting
# for the other to accept or decline
# --------
# p in (0,1); me in (0, goal), you in (0, goal)
# pending in (0, goal), double in [1, 2, 'double', 'accept']
# return in ['roll', 'hold', 'accept', 'decline', 'double']
(p, me, you, pending, double) = state
# # you have to accept or decline when your opponent just doubled.
# if double == 'double':
# return set(['decline', 'accept'])
# # you can continue roll and hold when your opponent accept your double
# if double == 'accept':
# return set(['roll', 'hold'])
# # you can continue roll and hold and raise to 'double' mode
# if double == 1 and pending == 0:
# return set(['roll', 'double'])
# elif double == 1 and pending != 0:
# return set(['roll', 'hold', 'double'])
# # you can continue roll and hold, but you cannot raise any more
# if double == 2 and pending == 0:
# return set(['roll'])
# elif double == 2 and pending != 0:
# return set(['roll', 'hold'])
actions = (['accept', 'decline'] if double == 'double' else
['roll', 'hold'] if pending else
['roll'])
if double == 1: actions.append("double")
return actions
|
def dar_troco(valor_a_pagar, valor_em_dinheiro):
""" Calcule o troco numa lista com notas de 1,2,5,10,20,50 com sua
quantidade de notas sem considerar centavos
ex:
1 e 10 retorna troco_notas_quantidade = [5,2] quantidade_notas = [1,2]"""
notas = (50, 20, 10, 5, 2, 1)
valor_troco =valor_em_dinheiro - valor_a_pagar
troco = []
for nota in notas:
quantidade = valor_troco // nota
valor_troco = valor_troco % nota
if quantidade != 0:
troco.append((nota, quantidade))
return troco
|
def is_structured(dt):
"""Check if the dtype is structured."""
if not hasattr(dt, "fields"):
return False
return dt.fields is not None
|
def _split_categories(value):
"""Splits the categories on comma. Returns a list of categories or if
there is one category named 'none' returns an empty list."""
categories = [c.strip() for c in value.split(',')]
if len(categories) == 1 and categories[0].lower() == 'none':
return []
return categories
|
def address_repr(buf, reverse: bool = True, delimit: str = "") -> str:
"""Convert a buffer into a hexlified string."""
order = range(len(buf) - 1, -1, -1) if reverse else range(len(buf))
return delimit.join(["%02X" % buf[byte] for byte in order])
|
def format_literal(raw):
"""Format a literal into a safe format. This is used to format the
flont:Literal IRI.
"""
return "_" + raw.replace(" ", "_")
|
def isValidOs(sOs):
"""
Validates the OS name.
"""
if sOs in ('darwin', 'dos', 'dragonfly', 'freebsd', 'haiku', 'l4', 'linux', 'netbsd', 'nt', 'openbsd', \
'os2', 'solaris', 'win', 'os-agnostic'):
return True;
return False;
|
def return_hashtags(hashtag_list: list) -> str:
"""Returns a string of formatted hashtags from a list."""
hashtag_string = [f"#{tag} " for tag in hashtag_list]
print("\n".join(hashtag_string))
return "\n".join(hashtag_string)
|
def __get_edge_dict(uid, source, target, color, edge_type):
"""
Create dictionary for edges
:param uid:
:param source:
:param target:
:param color:
:param edge_type:
:return:
"""
return {
'id': uid,
'source': source,
'target': target,
'color': color,
'edge_type': edge_type
}
|
def split_host_and_port(host):
"""
Splits a string into its host and port components
:param host: a string matching the following pattern: <host name | ip address>[:port]
:return: a Dictionary containing 'host' and 'port' entries for the input value
"""
if host is None:
host_and_port = None
else:
host_and_port = {}
parts = host.split(":")
if parts is not None:
length = len(parts)
if length > 0:
host_and_port['host'] = parts[0]
if length > 1:
host_and_port['port'] = int(parts[1])
return host_and_port
|
def get_chart_url(country_code, interval, date1, date2):
"""Gets a url with the specified parameters"""
chart_url = f'https://spotifycharts.com/regional/{country_code}/{interval}/{date1}--{date2}'
return chart_url
|
def make_adjacency_matrix(g):
""" Make an adjacency matrix from a dict of node: [neighbors] pairs
"""
keys=sorted(g.keys())
size=len(keys)
M = [ [0]*size for i in range(size) ]
"""
for a, row in g.items() iterates over the key:value entries in dictionary, and for b in row iterates over the values. If we used (a,b), this would have given us all the pairs.
(keys.index(a), keys.index(b)) But we need the index to assign to the corresponding matrix entry,
keys=sorted(g.keys()) that's why we extracted and sorted the keys.
for a,b in... getting the index entries and assigning value 1 or 2 based on diagonal element or not.
M = [ [0]*size for ... matrix cannot be used before initialization.
"""
for a,b in [(keys.index(a), keys.index(b)) for a, row in g.items() for b in row]:
M[a][b] = 2 if (a==b) else 1
return M
|
def get_provenance_record(attributes, obsname, ancestor_files):
"""Create a provenance record describing the diagnostic data and plot."""
if obsname != '':
caption = (
"{long_name} bias for average between {start_year} and {end_year}".
format(**attributes) + " against " + obsname + " observations.")
else:
caption = (
"Average {long_name} between {start_year} and {end_year} ".format(
**attributes))
record = {
'caption': caption,
'statistics': ['mean'],
'domains': ['global'],
'plot_type': 'map',
'authors': [
'lovato_tomas',
],
'references': [
'acknow_project',
],
'ancestors': ancestor_files,
}
return record
|
def VersionString(versionTuple):
""" (x,y,z .. n) -> 'x.y.z...n' """
return '.'.join(str(x) for x in versionTuple)
|
def solution_b(puzzle, stop=2020):
"""
This next solution is even faster, about 25%, thanks to Gravitar64.
https://github.com/Gravitar64/Advent-of-Code-2020
Counting the turns from len(puzzle) instead of len(puzzle) + 1 makes
everything so easy and nice!
"""
spoken = {last: turn for turn, last in enumerate(puzzle, 1)}
last = puzzle[-1]
for turn in range(len(puzzle), stop):
recent = spoken.get(last, turn)
spoken[last] = turn
last = turn - recent
return last
|
def unique_char(str):
"""Get index of first unique char."""
frequency = {}
unique_chars = []
for index, char in enumerate(str):
if char in frequency:
frequency[char] += 1
unique_chars = [item for item in unique_chars if item[1] != char]
else:
frequency[char] = 1
unique_chars.append((index, char))
return unique_chars[0][0]
|
def icon_dpi(px, icon):
"""
Inkscape default: 90dpi == 1-to-1 for 100px.
"""
x1,y1,x2,y2 = [float(a) for a in icon['view_box'].split(" ")]
width = x2-x1
height = y2-y1
# Fallback for invalid viewbox
if width < 0:
width = 100
if height < 0:
height = 100
return 90. / max(width, height) * px
|
def find_profile_lines(data, profile_name):
"""
Takes data as a list of lines and finds the range (startline, endline)
where we found the profile, where startline is the line right after the
profile name
"""
start, end = None, None
for i, line in enumerate(data):
if start is not None:
if line[0] == '[':
end = i
break
elif line.startswith('[{}]'.format(profile_name)):
start = i + 1
if end is None and start is not None:
end = len(data)
return start, end
|
def choice(*args):
"""
Creates a choice argument type. The user will be able to choose one of given possibilities.
Example:
choice("quickly", "slowly")
:param args: a list of string the user will be able to choose
:return: a dict representing this argument type with the appropriate format to be used by the JS front-end script
"""
assert len(args) > 0, "You must specify at least one choice"
return {
"type": "choice",
"choices": [c for c in args]
}
|
def append_cluster(cluster_a, cluster_b):
""" Appends cluster_b to cluster_a
:param cluster_a: array of shape [n_points, n_dimensions]
:param cluster_b: array of shape [n_points, n_dimensions]
"""
for point in cluster_b:
cluster_a.append(point)
return cluster_a
|
def checkTypeRecursively(inObject):
"""
This method check the type of the inner object in the inObject.
If inObject is an interable, this method returns the type of the first element
@ In, inObject, object, a pyhon object
@ Out, returnType, str, the type of the inner object
"""
returnType = type(inObject).__name__
try:
for val in inObject:
returnType = checkTypeRecursively(val)
break
except:
pass
return returnType
|
def _is_multiline_string(value: str):
"""Determine if a string is multiline.
.. note::
Inspired by http://stackoverflow.com/a/15423007/115478.
:param value: The value to check
:returns: A boolean indicating if the string is multiline
"""
for character in "\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if character in value:
return True
return False
|
def inputs(form_args):
"""
Creates list of input elements
"""
element = []
for name, value in form_args.items():
element.append(
'<input type="hidden" name="{}" value="{}"/>'.format(name, value))
return "\n".join(element)
|
def arelle_parse_value(d):
"""Decodes an arelle string as a python type (float, int or str)"""
if not isinstance(d, str): # already decoded.
return d
try:
return int(d.replace(',', ''))
except ValueError:
pass
try:
return float(d.replace(",", ""))
except ValueError:
pass
return d
|
def map_codes_to_values(codes, values):
""" Map the huffman code to the right value """
out = {}
for i in range(len(codes)):
out[codes[i]] = values[i]
return out
|
def rfcspace(string):
"""
If the string is an RFC designation, and doesn't have
a space between 'RFC' and the rfc-number, a space is
added
"""
string = str(string)
if string[:3].lower() == "rfc" and string[3] != " ":
return string[:3].upper() + " " + string[3:]
else:
return string
|
def get_severity(severity):
"""
Returns Severity as per DefectDojo standards.
:param severity:
:return:
"""
if severity == "high":
return "High"
elif severity == "medium":
return "Medium"
elif severity == "low":
return "Low"
elif severity == "informational":
return "Informational"
else:
return "Critical"
|
def bytes_to_31_bit_int(as_bytes):
"""
Convert the 31 least-signficant bits to an integer,
truncating any more significant bits.
"""
as_bytes = bytearray(as_bytes)
if len(as_bytes) < 4:
pad_len = 4 - len(as_bytes)
as_bytes = bytearray(pad_len * [0]) + as_bytes
as_int = (((as_bytes[-4] & 0x7f) << 3*8) +
(as_bytes[-3] << 2*8) +
(as_bytes[-2] << 1*8) +
(as_bytes[-1] << 0*8))
return as_int
|
def cut_include_start_end(some_text, start_text, end_text, maximum_lines_per_section=10000):
""" from some_text (output from Network device session), returns a List of List(strings), sections of some_text
containing the lines between StartText to EndText, INCLUDING StartText and EndText on the returning sections.
Used when the output from the Network Device needs to be trimmed before is processed.
to extract sections (e.g. Interfaces)
:param some_text usually the full command output
:param start_text the text that defines the begging of a section
:param end_text the text that defines the ending of a section
:param maximum_lines_per_section if the end_text is not found yet how many lines we want to take in a section
"""
include = False
matching_list_text = []
list_content = []
counter = 0
for line in some_text:
if not include:
if line.find(start_text) >= 0:
include = True
list_content.append(line)
counter += 1
else:
if line.find(start_text) >= 0:
matching_list_text.append(list_content)
list_content = []
counter = 0
list_content.append(line)
elif line.find(end_text) >= 0 or counter >= maximum_lines_per_section:
include = False
list_content.append(line)
matching_list_text.append(list_content)
list_content = []
counter = 0
else:
list_content.append(line)
counter += 1
if len(list_content) > 0:
matching_list_text.append(list_content)
return matching_list_text
|
def _get_leaf_list(tree):
"""
get all leaves of tree.
"""
if isinstance(tree, int):
return list()
leaves = list()
for sub_tree in tree:
if isinstance(sub_tree, int):
leaves.append(sub_tree)
else:
leaves.extend(
_get_leaf_list(sub_tree)
)
return leaves
|
def prepare_update_request(rows_to_update, rows_to_append, users_to_verify_data):
"""Create the request's body used for batch update."""
value_ranges = []
for row_number, dumped_data in rows_to_update:
# there is going to be updated just a single column within a specific row range
value_ranges.append(
{
"majorDimension": "ROWS",
"range": f"C{row_number}",
"values": [[dumped_data]],
}
)
if rows_to_append:
next_empty_row_number = len(users_to_verify_data) + 2
value_ranges.append(
{
"majorDimension": "ROWS",
"range": f"A{next_empty_row_number}:AA1000",
"values": rows_to_append,
}
)
return value_ranges
|
def format_judge(submission):
"""
judge if the submission file's format is legal
:param submission: submission file
:return: False for illegal
True for legal
"""
# submission: [sentenceID,antecedent_startid,antecedent_endid,consequent_startid,consequent_endid]
if submission[1] == '-1' or submission[2] == '-1':
return False
if (submission[3] == '-1' and submission[4] != '-1') or (submission[3] != '-1' and submission[4] == '-1'):
return False
if (int(submission[1]) >= int(submission[2])) or (int(submission[3]) > int(submission[4])):
return False
if not (int(submission[1]) >= -1 and int(submission[2]) >= -1 and int(submission[3]) >= -1 and int(
submission[4]) >= -1):
return False
return True
|
def human_bytes(num, suffix='B'):
""" Convert a number to bytes to a human-readable form"""
# taken from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
|
def is_directory_automatically_created(folder: str):
"""
Verifies the name of the directory -> if it contains a month it returns True, otherwise False.
"""
months = [
"(01)Janvier",
"(02)Fevrier",
"(03)Mars",
"(04)Avril",
"(05)Mai",
"(06)Juin",
"(07)Juillet",
"(08)Aout",
"(09)Septembre",
"(10)Octobre",
"(11)Novembre",
"(12)Decembre",
]
return any(month in folder for month in months)
|
def debug(txt):
""" Print text to console."""
print(txt)
return ''
|
def divide_ceiling(numerator, denominator):
""" Determine the smallest number k such, that denominator * k >= numerator """
split_val = numerator // denominator
rest = numerator % denominator
if rest > 0:
return split_val + 1
else:
return split_val
|
def getMedian(alist):
"""get median of list"""
tmp = list(alist)
tmp.sort()
alen = len(tmp)
if (alen % 2) == 1:
return tmp[alen // 2]
else:
return (tmp[alen // 2] + tmp[(alen // 2) - 1]) / 2
|
def dominates(a, b):
"""Return true if a Pareto dominates b (maximization)"""
equals = True
for i in range(len(a)):
equals = equals and a[i] == b[i]
if a[i] < b[i]:
return False
if equals:
return False
return True
|
def normalize_disp(dataset_name):
"""Function that specifies if disparity should be normalized"""
return dataset_name in ["forward_facing"]
|
def vmul(vec1, vec2):
"""Return element wise multiplication"""
return [v1*v2 for v1, v2 in zip(vec1, vec2)]
|
def list_to_string(ilist):
""" Takes a list of instructions and combines them into a single string.
This is a helper for compact_instructions()."""
str = ""
for s in ilist:
if len(str) > 0:
str += "\n"
str += s
return str
|
def cm_q(cm_i, l_t, mac):
""" This calculates the damping in pitch coefficient
Assumptions:
None
Source:
J.H. Blakelock, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
cm_i [dimensionless]
l_t [meters]
mac [meters]
Outputs:
cm_q [dimensionless]
Properties Used:
N/A
"""
cm_q = 2. * 1.1 * cm_i * l_t / mac
return cm_q
|
def filter_metrics_list(metrics_list, filters):
"""
Filter metrics list based on filters:
* filters: space separated list of filtered strings,
exclamation mark before word means negative filter
"""
if isinstance(filters, str):
filters = filters.split()
for _filter in filters:
if _filter[0] == '!':
# process negative filter
_filter = _filter[1:]
_filtered_metrics = [metric for metric in metrics_list if _filter not in metric]
else:
# process positive filter
_filtered_metrics = [metric for metric in metrics_list if _filter in metric]
metrics_list = _filtered_metrics
# filter "archive" metrics
_filtered_metrics = [metric for metric in metrics_list if ".archive." not in metric]
metrics_list = _filtered_metrics
return metrics_list
|
def sum(sequence, start=0):
"""sum(sequence[, start]) -> value
Returns the sum of a sequence of numbers (NOT strings) plus the value
of parameter 'start' (which defaults to 0). When the sequence is
empty, returns start."""
if isinstance(start, str):
raise TypeError("sum() can't sum strings [use ''.join(seq) instead]")
if isinstance(start, bytes):
raise TypeError("sum() can't sum bytes [use b''.join(seq) instead]")
if isinstance(start, bytearray):
raise TypeError("sum() can't sum bytearray [use b''.join(seq) instead]")
last = start
for x in sequence:
# Very intentionally *not* +=, that would have different semantics if
# start was a mutable type, such as a list
last = last + x
return last
|
def str_to_bool(value):
""" convert the boolean inputs to actual bool objects"""
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
|
def _is_passing_grade(course_grade):
"""
Check if the grade is a passing grade
"""
if course_grade:
return course_grade.passed
return False
|
def polynomial_power_combinations(degree):
"""
Combinations of powers for a 2D polynomial of a given degree.
Produces the (i, j) pairs to evaluate the polynomial with ``x**i*y**j``.
Parameters
----------
degree : int
The degree of the 2D polynomial. Must be >= 1.
Returns
-------
combinations : tuple
A tuple with ``(i, j)`` pairs.
Examples
--------
>>> print(polynomial_power_combinations(1))
((0, 0), (1, 0), (0, 1))
>>> print(polynomial_power_combinations(2))
((0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (0, 2))
>>> # This is a long polynomial so split it in two lines
>>> print(" ".join([str(c) for c in polynomial_power_combinations(3)]))
(0, 0) (1, 0) (0, 1) (2, 0) (1, 1) (0, 2) (3, 0) (2, 1) (1, 2) (0, 3)
>>> # A degree zero polynomial would be just the mean
>>> print(polynomial_power_combinations(0))
((0, 0),)
"""
if degree < 0:
raise ValueError("Invalid polynomial degree '{}'. Must be >= 0.".format(degree))
combinations = ((i, j) for j in range(degree + 1) for i in range(degree + 1 - j))
return tuple(sorted(combinations, key=sum))
|
def rename_categories(old_to_new_map, data):
"""
Rename categories in task files
Args:
`old_to_new_map` should look like
{
"old category name 1": "new category name 1",
"old category name 2": "new category name 2"
}
"""
for i, category in enumerate(data["Categories"]):
if category in old_to_new_map:
data["Categories"][i] = old_to_new_map[category]
return data
|
def is_keyword(v):
""" Check if a value is of the format required to be a call to a header keyword
Parameters
----------
v : str
Value to be tested
Returns
-------
valid : bool
True if 'v' has the correct format to be a header keyword, False otherwise.
"""
valid = True
if ("," in v) or ("." not in v):
# Either an array or doesn't have the header keyword format (i.e. a fullstop)
return False
# Test if the first element is an integer
vspl = v.split(".")
try:
int(vspl[0])
except ValueError:
valid = False
# Test if there are two parts to the expression
if len(vspl) != 2:
return False
# Test if the second element is a string
try:
if valid is True:
int(vspl[1])
# Input value must be a floating point number
valid = False
except ValueError:
# Input value must be a string
valid = True
return valid
|
def _iface_cell_value(arr, loc):
""" Returns I face value for cell-centered data. """
i, j, k = loc
# FIXME: built-in ghosts
return 0.5 * (arr(i+1, j+1, k+1) + arr(i, j+1, k+1))
|
def split(n, m, rank=None):
"""
Return an iterator through the slices that partition a list of n elements
in m almost same-size groups. If a rank is provided, only the slice
for the rank is returned.
Example
-------
>>> split(1000, 2)
(slice(0, 500, None), slice(500, 1000, None))
>>> split(1000, 2, 1)
slice(500, 1000, None)
"""
if rank is not None:
work = n // m + ((n % m) > rank)
start = n // m * rank + min(rank, n % m)
return slice(start, start + work)
def generator():
rank = 0
start = 0
while rank < m:
work = n // m + ((n % m) > rank)
yield slice(start, start + work)
start += work
rank += 1
return tuple(generator())
|
def build_profile(first, last, **user_info):
"""Build dict containing everything we know about a user"""
profile = {}
profile['first name'] = first
profile['last name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
|
def fiveplates_field_file(field):
"""
string representation of targets.txt file for field within
fiveplates_field_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_targets.txt'
|
def calc_blinds_activation(radiation, g_gl, Rf_sh):
"""
This function calculates the blind operation according to ISO 13790.
:param radiation: radiation in [W/m2]
:param g_gl: window g value
:param Rf_sh: shading factor
"""
# activate blinds when I =300 W/m2
if radiation > 300: # in w/m2
return g_gl * Rf_sh
else:
return g_gl
|
def flatten_dict_values(d: dict) -> list:
"""Extract all values from a nested dictionary.
Args:
d: Nested dictionary from which to extract values from
Returns:
All values from the dictionary as a list
"""
if isinstance(d, dict):
flattened = []
for k, v in d.items():
if isinstance(v, dict):
flattened.extend(flatten_dict_values(v))
else:
flattened.append(v)
return flattened
else:
return [d]
|
def to_lower(input_text: str) -> str:
""" Convert input text to lower case """
return input_text.lower()
|
def dectodms(decdegs):
"""Convert Declination in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
decdegs -- Dec. in degrees format
Return value:
dec -- list of 3 values, [degrees,minutes,seconds]
"""
sign = -1 if decdegs < 0 else 1
decdegs = abs(decdegs)
if decdegs > 90:
raise ValueError("coordinate out of range")
decd = int(decdegs)
decm = int((decdegs - decd) * 60)
decs = (((decdegs - decd) * 60) - decm) * 60
# Necessary because of potential roundoff errors
if decs - 60 > -1e-7:
decm += 1
decs = 0
if decm == 60:
decd += 1
decm = 0
if decd > 90:
raise ValueError("coordinate out of range")
if sign == -1:
if decd == 0:
if decm == 0:
decs = -decs
else:
decm = -decm
else:
decd = -decd
return (decd, decm, decs)
|
def re_primary_dirname(dirname):
""" Tests if a dirname string, can be matched against just primary. Note
that this is a subset of re_primary_filename(). """
if 'bin/' in dirname:
return True
if dirname.startswith('/etc/'):
return True
return False
|
def FormatDateString(dt, time=True):
"""Formats a DateTime object into a string for display. If dt is not
a DateTime, then the empty string is returned.
Parameters:
* dt - An instance of DateTime.DateTime
* time - If True, displays the time. If False, only the date is
displayed.
Returns:
A formatted time string, or '' if the input cannot be parsed.
"""
if hasattr(dt, 'isoformat'):
datestr = dt.isoformat().replace('T', ' ')
return datestr[:time and 19 or 10]
else:
return ''
|
def stations_by_river(stations):
"""For a list of MonitoringStation objects (stations),
returns a dictionary that maps river names (key) to a list of MonitoringStation objects on a given river."""
# Dictionary containing river names and their corresponding stations
rivers = {}
for station in stations:
# Check if river is already in the dictionary
if station.river in rivers:
# Check if the station has already been added to the list
if station not in rivers[station.river]:
rivers[station.river].append(station)
else:
rivers.update({station.river: [station]})
return rivers
|
def print_path(path):
"""
path is a list of nodes
"""
result = []
for i in range(len(path)):
result.append(str(path[i]))
# if not last path
if i != len(path) - 1:
result.append('->')
return ''.join(result)
|
def default_value(value, default):
"""
Returns a specified default value if the provided value is None.
Args:
value (object): The value.
default (object): The default value to use if value is None.
Returns:
object: Either value, or the default.
"""
return value if value is not None else default
|
def _collect_lines(data):
"""Split lines from data into an array, trimming them """
matches = []
for line in data.splitlines():
match = line.strip()
matches.append(match)
return matches
|
def make_singular(word):
"""Relatively naive/imperfect function to make a word singular
Parameters
----------
word : str
The string to make singular (e.g. 'zebras').
Returns
-------
str
The string in singular form (e.e. 'zebra').
"""
if not isinstance(word, str) or not word:
return word
word_as_lower = word.casefold()
# Not a plural
if not word_as_lower.endswith('s'):
return word
# Word ends in 's' and is therefore possibly plural
else:
es_endings = ('sses', 'shes', 'ches', 'xes', 'zes')
if word_as_lower.endswith(es_endings):
# Then the word was pluralized by adding 'es'
return word[:-2]
elif word_as_lower.endswith('ss'):
# Then it's probably not a plural, e.g. 'assess' or 'process'
return word
elif len(word) <= 2:
# Then it's probably not a plural, e.g. 'OS'
return word
elif word_as_lower.endswith('sis') or word_as_lower.endswith('us'):
# Then it's probably singular like 'analysis' and 'cactus' and 'focus'
return word
else:
# Assume regular noun pluralization of adding an 's'
return word[:-1]
|
def c_to_f(temp):
"""Returns Celsius temperature as Fahrenheit"""
return temp * (9/5) + 32
|
def formatPath(input_string):
""" function to correct backslash issues in paths
usage: strPath = ut.formatPath(strPath)
"""
lstReplace = [["\a","/a"],
["\b","/b"],
["\f","/f"],
["\n","/n"],
["\r","/r"],
["\t","/t"],
["\v","/v"],
["\\","/"]]
# replce each type of escape
for old, new in lstReplace:
input_string = input_string.replace(old, new)
return input_string
|
def makegainstr(gainset):
"""Return a shorted string for the gainsetting"""
if gainset.upper()=='FAINT':
gnstr='FA'
elif gainset.upper()=='BRIGHT':
gnstr='BR'
else:
gnstr=''
return gnstr
|
def nattrs(res):
"""
Return a color gradient based on the
"""
# Split list into
if res:
return dict(fillcolor='yellow')
return dict(fillcolor='white')
|
def RPL_INFO(sender, receipient, message):
""" Reply Code 371 """
return "<" + sender + ">: " + message
|
def sieveOfEratosthenes(n):
"""
Given a number n, returns a list of all primes up to n.
"""
values = [x for x in range(2, n+1)]
primes = []
#Any encountered value not marked False is a prime
for i,v in enumerate(values):
if v is not False:
primes.append(v)
#Assign False to every v-th value starting at the next multiple of v
values[i+v::v] = [False] * (((n-1) - (i+1)) // v)
return primes
|
def write_transient_conv_msg(nMax, totTime):
"""Return the convergence status message for writing to file."""
PrintMsg = f"\nSTATUS: SOLUTION OBTAINED AT\nTIME LEVEL=\
{totTime} s.\nTIME STEPS= {nMax}"
print(PrintMsg)
print()
return PrintMsg
|
def pick(keys, dict):
"""Returns a partial copy of an object containing only the keys specified. If
the key does not exist, the property is ignored"""
picked_dict = {}
for k in dict.keys():
if k in keys:
picked_dict[k] = dict[k]
return picked_dict
|
def generate_route53_records(properties):
"""Return list of AWS::Route53::RecordSet resources required"""
records = []
if properties.get("VerificationToken"):
records.append({
"Name": "_amazonses.{Domain}.".format(**properties),
"Type": "TXT",
"ResourceRecords": ['"{VerificationToken}"'.format(**properties)]})
if properties.get("DkimTokens"):
records.extend([{
"Name": "{token}._domainkey.{Domain}.".format(token=token, **properties),
"Type": "CNAME",
"ResourceRecords": ["{token}.dkim.amazonses.com.".format(token=token)],
} for token in properties["DkimTokens"]])
if properties.get("MailFromDomain"):
if properties.get("MailFromMX"):
records.append({
"Name": "{MailFromDomain}.".format(**properties),
"Type": "MX",
"ResourceRecords": ["10 {MailFromMX}.".format(**properties)]})
if properties.get("MailFromSPF"):
records.append({
"Name": "{MailFromDomain}.".format(**properties),
"Type": "TXT",
"ResourceRecords": [properties["MailFromSPF"]]})
if properties.get("DMARC"):
records.append({
"Name": "_dmarc.{Domain}.".format(**properties),
"Type": "TXT",
"ResourceRecords": [properties["DMARC"]]})
if properties.get("ReceiveMX"):
records.append({
"Name": "{Domain}.".format(**properties),
"Type": "MX",
"ResourceRecords": ["10 {ReceiveMX}.".format(**properties)]})
for record in records:
record["TTL"] = properties["TTL"]
return records
|
def is_valid_medialive_channel_arn(mlive_channel_arn):
"""Determine if the ARN provided is a valid / complete MediaLive Channel ARN"""
if mlive_channel_arn.startswith("arn:aws:medialive:") and "channel" in mlive_channel_arn:
return True
else:
return False
|
def sort_dict(dictionary):
"""Sorts a dictionary with only Integer keys ascending by these keys.
If the dictionary is not valid, i.e. does not exclusively contain Integer keys, an Error is raised.
:param dictionary: The to be sorted dictionary
:return: The sorted dictionary, ascending by Integer keys.
"""
try:
sorted_dict = dict(sorted(dictionary.items(), key=lambda x: int(x[0])))
except ValueError or AttributeError:
raise
else:
return sorted_dict
|
def _SubtractMemoryStats(end_memory_stats, start_memory_stats):
"""Computes the difference in memory usage stats.
Each of the two stats arguments is a dict with the following format:
{'Browser': {metric: value, ...},
'Renderer': {metric: value, ...},
'Gpu': {metric: value, ...},
'ProcessCount': value,
etc
}
The metrics can be VM, WorkingSetSize, ProportionalSetSize, etc depending on
the platform/test.
NOTE: The only metrics that are not subtracted from original are the *Peak*
memory values.
Returns:
A dict of process type names (Browser, Renderer, etc.) to memory usage
metrics between the end collected stats and the start collected stats.
"""
memory_stats = {}
end_memory_stats = end_memory_stats or {}
start_memory_stats = start_memory_stats or {}
for process_type in end_memory_stats:
memory_stats[process_type] = {}
end_process_memory = end_memory_stats[process_type]
if not end_process_memory:
continue
# If a process has end stats without start stats then report the end stats.
# For example, a GPU process that started just after media playback.
if (process_type not in start_memory_stats or
not start_memory_stats[process_type]):
memory_stats[process_type] = end_process_memory
continue
if not isinstance(end_process_memory, dict):
start_value = start_memory_stats[process_type] or 0
memory_stats[process_type] = end_process_memory - start_value
else:
for metric in end_process_memory:
end_value = end_process_memory[metric]
start_value = start_memory_stats[process_type].get(metric, 0)
if 'Peak' in metric:
memory_stats[process_type][metric] = end_value
else:
memory_stats[process_type][metric] = end_value - start_value
return memory_stats
|
def getDigits(num,baseNum,digitsList):
"""
Input: num, baseNum, and digitsList must all come from base(num,base) as is
currently specified.
Output: returns each digit in the output number of base(num,base).
"""
tmpList=[]
for x in digitsList:
if x*(baseNum)>num:
tmpList.append(x)
return max((set(digitsList)-set(tmpList)))
|
def escape_tag(string: str):
"""
Escape `<` and `>` by replace it with `<` and `>`
"""
return string.replace("<", "<").replace(">", ">")
|
def is_report(post_site_id):
"""
Checks if a post is a report
:param post_site_id: Report to check
:return: Boolean stating if it is a report
"""
if post_site_id is None:
return False
return True
|
def remove_zeros(mylist):
"""
Creates a new list which is equal to the input list but without zeros.
@param mylist: The array to delete zeros.
@return: The list without zeros.
"""
myretlist = []
for elem in mylist:
if elem!=0:
myretlist.append(elem)
return myretlist
|
def length_lt(value, arg):
"""Returns a boolean of whether the value's length is less than the
argument.
"""
return len(value) < int(arg)
|
def rfl_to_mmh(z, a=256.0, b=1.42):
"""
wradlib.zr.z2r function
"""
return (z / a) ** (1.0 / b)
|
def parse_url(url):
"""Return the inclusion method and test_url part of a url.
Example url: http://192.168.2.148:8001/apg/iframe/?url=https://192.168.2.148:44300/leaks/10643/noauth/&browser=chrome&version=90.0.4430.85&wait_time=500
Example result: ("iframe", "https://192.168.2.148:44300/leaks/10643/noauth/")
"""
parts = url.split("?")
inc_method = parts[0].split("/")[-2:-1][0]
test_url = parts[1].split("=")[1].split("&")[0]
return inc_method, test_url
|
def sqrt(x):
"""
Calculate the square root of argument x.
"""
# Check that x is positive
if x < 0:
print("Error: negative value supplied")
return -1
else:
print("Here we go again...")
# Initial guess for the square root.
z = x / 2.0
# Continuously improve the guess.
# Adapted from https://tour.golang.org/flowcontrol/8
while abs(x - (z*z)) > 0.0000001:
z = z - (((z * z) - x) / (2 * z))
return z
|
def detect_internals(pstart, pend, hit_start, hit_end):
"""[Local] Check for internal features (for 'genomic_location' column, independent of users key option) """
feat_in_peak = (pstart < hit_start and hit_end < pend)
peak_in_feat = (hit_start < pstart and pend < hit_end)
if feat_in_peak:
return "FeatureInsidePeak"
elif peak_in_feat:
return "PeakInsideFeature"
else:
return "not.specified"
|
def is_condition_key_match(document_key, str):
""" Given a documented condition key and one from a policy, determine if they match
Examples:
- s3:prefix and s3:prefix obviously match
- s3:ExistingObjectTag/<key> and s3:ExistingObjectTag/backup match
"""
# Normalize both
document_key = document_key.lower()
str = str.lower()
# Check if the document key has a pattern match in it
if "$" in document_key:
# Some services use a format like license-manager:ResourceTag/${TagKey}
if str.startswith(document_key.split("$")[0]):
return True
elif "<" in document_key:
# Some services use a format like s3:ExistingObjectTag/<key>
if str.startswith(document_key.split("<")[0]):
return True
elif "tag-key" in document_key:
# Some services use a format like secretsmanager:ResourceTag/tag-key
if str.startswith(document_key.split("tag-key")[0]):
return True
# Just return whether they match exactly
return document_key == str
|
def is_not_nullprice(data):
"""
used by the filter to filter out the null entries
:param data: input data
:return: true if it's not null, false if null
"""
return data and data["MinimumPrice"] != None
|
def _extract_from_dict(dictionary, key, default=None):
"""Extract a given key from a dictionary. If the key doesn't exist
and a default parameter has been set, return that.
"""
try:
value = dictionary[key]
del dictionary[key]
except KeyError:
value = default
return value
|
def replace_constants(fragments):
"""Replace ARG1 constants with referents introduced by constant clauses."""
constant_ref_map = {
c[1]: c[2]
for f in fragments
for c in f
if len(c) == 3
and c[1].startswith('"')
and c[1].endswith('"')
and c[1] not in ('"?"',)
}
def replace(clause):
if len(clause) == 4 and clause[3] in constant_ref_map:
clause = (clause[0], clause[1], clause[2], constant_ref_map[clause[3]])
return clause
return tuple(tuple(replace(c) for c in f) for f in fragments)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.