content
stringlengths 42
6.51k
|
|---|
def count_dict_dups(dictionary):
"""dict -> int
Returns a set of the integers that have duplicates.
>>>count_dict_dups({'red': 1, 'blue': 2, 'green': 1, 'orange': 1})
1
>>>count_dict_dups({'a': 2, 'b': 2, 'c': 4, 'd': 4})
2
"""
seen_values = set()
duplicates = set()
for i in dictionary:
if dictionary[i] in seen_values:
duplicates.add(dictionary[i])
else:
seen_values.add(dictionary[i])
return (len(duplicates))
|
def Re(F_mass, z_way, d_inner, n_pipe, mu_mix):
"""
Calculates the Reynold criterion.
Parameters
----------
F_mass : float
The mass flow rate of feed [kg/s]
z_way : float
The number of ways in heat exchanger [dimensionless]
d_inner : float
The diametr of inner pipe, [m]
n_pipe : float
The number of pipes in heat exchanger, [dimensionless]
mu_mix : float
The mix viscocity of liquid, [Pa/s]
Returns
-------
Re : float
The Reynold criterion, [dimensionless]
References
----------
&&&&&&
"""
return 0.785 * F_mass * z_way / (d_inner * n_pipe * mu_mix)
|
def build_project(project_name):
"""Builds eclipse project file.
Uses a very simple template to generate an eclipse .project file
with a configurable project name.
Args:
project_name: Name of the eclipse project. When importing the project
into an eclipse workspace, this is the name that will be shown.
Returns:
Contents of the eclipse .project file.
"""
template = """<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>{project_name}</name>
<comment>
</comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>"""
return template.format(project_name=project_name)
|
def indent(string, prefix=" "):
"""Indent a paragraph of text"""
return "\n".join([prefix + line for line in string.split("\n")])
|
def build_aggregation(facet_name, facet_options, min_doc_count=0):
"""Specify an elasticsearch aggregation from schema facet configuration.
"""
exclude = []
if facet_name == 'type':
field = 'embedded.@type'
exclude = ['Item']
elif facet_name.startswith('audit'):
field = facet_name
else:
field = 'embedded.' + facet_name
agg_name = facet_name.replace('.', '-')
facet_type = facet_options.get('type', 'terms')
facet_length = 200
if facet_options.get('length') == 'long':
facet_length = 3000
if facet_type in ['terms', 'typeahead']:
agg = {
'terms': {
'field': field,
'min_doc_count': min_doc_count,
'size': facet_length,
},
}
if exclude:
agg['terms']['exclude'] = exclude
elif facet_type == 'exists':
agg = {
'filters': {
'filters': {
'yes': {
'bool': {
'must': {
'exists': {'field': field}
}
}
},
'no': {
'bool': {
'must_not': {
'exists': {'field': field}
}
}
},
},
},
}
else:
raise ValueError('Unrecognized facet type {} for {} facet'.format(
facet_type, field))
return agg_name, agg
|
def map_list_to_kwargs(file_names: list, mapping: dict):
"""
Maps a list of files to their corresponding *mrconvert* inputs kwargs.
Parameters
----------
file_names : list
A list of existing files.
Returns
-------
Tuple[str, str, str, str]
Four sorted outputs: *in_file*,*json*,*bvec*,*bval*
"""
from pathlib import Path
out_dict = {}
for file_name in file_names:
suffixes = Path(file_name).suffixes
suffix = "".join(suffixes).lstrip(".")
for key, val in mapping.items():
if suffix in val:
out_dict[key] = file_name
return out_dict
|
def translate(c):
""" Turn upper case characters into a visible character, lower case into invisible. Maintain newlines """
if c == '\n':
return c
return 'X' if c.isupper() else ' '
|
def f1_acc(FN, TN, TP, FP):
"""
Returns:
0.5 * f1measure + 0.5 * accuracy
"""
acc = 1.0 * (TP + TN) / (TP + TN + FP + FN)
P = 0
if TP > 0:
P = 1.0 * TP / (TP + FP)
R = 0
if TP > 0:
R = 1.0 * TP / (TP + FN)
f1 = 0
if P > 0 and R > 0:
f1 = 2.0 * P * R / (P + R)
return 0.5 * acc + 0.5 * f1
|
def calc_ios(bbox_1, bbox_2):
"""Calculate intersection over small ratio
This is a variant of more commonly used IoU (intersection over union) metric
All coordinates are in the order of (ymin, xmin, ymax, xmax)
Args:
bbox_1:
bbox_2:
Returns:
"""
def cal_area(bbox):
# calculate the area for a bbox in format (y_min, x_min, y_max, x_max)
return max(bbox[2] - bbox[0], 0) * max(bbox[3] - bbox[1], 0)
ymin_1, xmin_1, ymax_1, xmax_1 = bbox_1
ymin_2, xmin_2, ymax_2, xmax_2 = bbox_2
x_min = max(xmin_1, xmin_2)
y_min = max(ymin_1, ymin_2)
x_max = min(xmax_1, xmax_2)
y_max = min(ymax_1, ymax_2)
area_intersection = cal_area([y_min, x_min, y_max, x_max])
area_small = min(cal_area(bbox_1), cal_area(bbox_2))
ios = area_intersection / area_small
return ios
|
def calc_delta_shift(H, N, Href, Nref, f=5):
"""Find combined chem shift"""
import math
delta_shift = math.sqrt( math.pow(f*(H - Href),2) + math.pow(N - Nref, 2) )
return delta_shift
|
def _str_to_list(s):
"""Converts a comma separated string to a list"""
_list = s.split(",")
return list(map(lambda i: i.lstrip(), _list))
|
def month(num):
"""
asks for user's birth month and checks if it is a master or not.
If not convert the input to digits and return the sum of the digits.
"""
# Tests for inputs greater than or equal 13, since no months exists above 12
if int(num) > 12:
print("Error! Enter the month number in the range 1 to 12:")
int(input("Enter the number of your birth month. For example, July would be 7:"))
else:
pass
return int(num)
|
def contig_lengths(contigs):
"""Return number of contigs greater than 'X'."""
return {
'1k': sum(i > 1000 for i in contigs),
'10k': sum(i > 10000 for i in contigs),
'100k': sum(i > 100000 for i in contigs),
'1m': sum(i > 1000000 for i in contigs)
}
|
def _get_early_stopping_params(early_stopping):
"""Set default parameters if not already set in input.
"""
if early_stopping is not None:
if early_stopping['type'] == 'cost':
if 'threshold' not in early_stopping:
early_stopping['threshold'] = 0.005
if 'length_mean' not in early_stopping:
early_stopping['length_mean'] = 5
if 'length_ratio_plateau' not in early_stopping:
early_stopping['length_ratio_plateau'] = 2
elif early_stopping['type'] == 'h_norm':
if 'threshold' not in early_stopping:
early_stopping['threshold'] = 0.01
if 'length_mean' not in early_stopping:
early_stopping['length_mean'] = 5
if 'length_ratio_plateau' not in early_stopping:
early_stopping['length_ratio_plateau'] = 1
else:
raise ValueError('Cannot find match of early stopping method.')
if 'min_iter' not in early_stopping:
early_stopping['min_iter'] = 250
return early_stopping
|
def meters_to_feet(n):
"""
Convert a length from meters to feet.
"""
return float(n) * 3.28084
|
def define_orderers(orderer_names, orderer_hosts, domain=None):
"""Define orderers as connection objects.
Args:
orderer_names (Iterable): List of orderer names.
orderer_hosts (Iterable): List of orderer hosts.
domain (str): Domain used. Defaults to none.
Returns:
dict: A dictionary of Orderer Connections
"""
orderer_connections = {}
for name, host in zip(orderer_names, orderer_hosts):
if domain:
key = "{name}.{domain}".format(name=name, domain=domain)
else:
key = name
orderer_connections[key] = {"url": ("grpc://" + host + ":7050")}
return orderer_connections
|
def get_clusterID(filename):
"""given a file name return the cluster id"""
return filename.split(".")[0]
|
def split_multiline(value):
"""Split a multiline string into a list, excluding blank lines."""
return [element for element in (line.strip() for line in value.split('\n'))
if element]
|
def user_name_to_file_name(user_name):
"""
Provides a standard way of going from a user_name to something that will
be unique (should be ...) for files
NOTE: NO extensions are added
See Also:
utils.get_save_root
"""
#Create a valid save name from the user_name (email)
#----------------------------------------------------------------------
#Good enough for now ...
#Removes periods from email addresses, leaves other characters
return user_name.replace('.','')
|
def best_calc_method(in_dict, maximum):
""" Calculates the number of time steps supply is
under demand
Parameters
----------
in_dict: keys => calc methods, values => results from
tests of each calc method (chi2 goodness of fit etc)
max: true/false boolean, true => find max of in_dict,
false => find min of in_dict
Returns
-------
returns a list of the calc methods that have the max
or min (depending on input) in the in_dict
"""
if maximum:
highest = max(in_dict.values())
best = [k for k, v in in_dict.items() if v == highest]
else:
lowest = min(in_dict.values())
best = [k for k, v in in_dict.items() if v == lowest]
return best
|
def as_segments(polyline):
"""Returns for a polyline a list of segments (start/end point"""
segments = []
l = len(polyline)
for i in range(l - 1):
j = i + 1
segments.append((polyline[i], polyline[j]))
return segments
|
def scan_critical(fname):
""" Find critical path and Fmax from VPR log (if any).
Returns
-------
critical_path : str
Critical path delay in nsec
fmax : str
Fmax in MHz.
"""
try:
with open(fname, 'r') as f:
final_cpd = 0.0
final_fmax = 0.0
final_cpd_geomean = 0.0
final_fmax_geomean = 0.0
for line in f:
if line.startswith('Final critical path delay'):
parts = line.split()
if len(parts) >= 9:
# Final critical path delay (least slack): 16.8182 ns, Fmax: 59.4592 MHz
final_cpd = float(parts[6])
final_fmax = float(parts[9])
elif len(parts) == 8 and parts[7].strip() == 'ns':
# Final critical path delay (least slack): 17.9735 ns
final_cpd = float(parts[6])
final_fmax = 1000. / final_cpd
if line.startswith(
'Final geomean non-virtual intra-domain period'):
parts = line.split()
final_cpd_geomean = parts[5]
if final_cpd_geomean == "nan":
final_cpd_geomean = "N/A"
final_fmax_geomean = "N/A"
continue
final_cpd_geomean = float(parts[5])
final_fmax_geomean = 1000. / final_cpd_geomean
return str(final_cpd), str(final_fmax), str(
final_cpd_geomean
), str(final_fmax_geomean)
except FileNotFoundError:
pass
return "", ""
|
def get_channel_from_filename(data_filename):
"""
Args:
data_filename (str)
Returns:
int
"""
channel_str = data_filename.lstrip('channel_').rstrip('.dat')
return int(channel_str)
|
def AnimateNameCheck(animate_name):
"""
make sure the file name is valid
"""
if animate_name.find(r"/")>=0:
animate_name = animate_name.replace(r"/","")
if animate_name.find(r":")>=0:
animate_name = animate_name.replace(r":","")
if animate_name.find(r"?")>=0:
animate_name = animate_name.replace(r"?","")
return animate_name
|
def methodArgs(item):
"""Returns a dictionary formatted as a string given the arguments in item.
Args:
item: dictionary containing key 'args' mapping to a list of strings
Returns:
dictionary formatted as a string, suitable for printing as a value
"""
args = ["'%s': %s" % (arg, arg) for arg in item['args']]
return '{%s}' % ', '.join(args)
|
def _append_to_final_data(final_data: dict, raw_data: dict, sample: dict):
"""Parsing raw data. Appending to final_data"""
for key, val in raw_data.items():
if key in final_data.keys():
category = f"{sample['category']}_{'_'.join(sample['workflows'])}"
if category not in final_data[key].keys():
final_data[key][category] = []
final_data[key][category].append(
{
"name": sample["_id"],
"application": sample["category"],
"workflows": ",".join(sample["workflows"]),
"x": sample["month"],
"y": val,
}
)
return final_data
|
def _build_selector(selectors, separator):
"""
Build a selector defined by the selector and separator
:param selectors: a list of strings that are selector
:param separator: the char that separates the different selectors
:return: the resulting selector
"""
selector = ""
for i, sel in enumerate(selectors):
if i != 0:
selector += separator
selector += "$" + sel.replace(" ", "%20")
return selector
|
def indent_block(s, amt=1):
"""
Intend the lines of a string.
:param s: The string
:param amt: The amount, how deep the string block should be intended (default 1)
:return: The indented string
"""
indent_string = " "
res = s
for i in range(0, amt):
res = indent_string + indent_string.join(res.splitlines(True))
return res
|
def normalize(vector):
""" Takes a vector and reduces each entry to 1, -1, or 0.
Returns a tuple"""
return tuple([1 if ele > 0 else -1 if ele < 0 else 0 for ele in vector])
|
def bounds_overlap(*bounds):
"""
"""
# get same element across each array
min_x_vals, min_y_vals, max_x_vals, max_y_vals = zip(*bounds)
overlaps = max(min_x_vals) < min(max_x_vals) and max(min_y_vals) < min(max_y_vals)
return overlaps
|
def is_list_empty(in_list):
""" Code from stack overflow: https://stackoverflow.com/a/1605679"""
if isinstance(in_list, list):
return all(map(is_list_empty, in_list))
return False
|
def key_by_path_dict(in_dict, path):
""" Path-based dictionary lookup.
Args:
in_dict (dict): Input dict.
path (str): Full path to the key (key.subkey1.subkey2).
Returns:
Value for key at ``path`` if found in ``in_dict``.
>>> key_by_path_dict(
... {'key1': {'subkey1': {'subkey2': 42}}},
... 'key1.subkey1.subkey2'
... )
42
>>> key_by_path_dict(
... {'key1': {'subkey1': {'subkey2': 42}}},
... 'key1.subkey2.subkey1'
... ) is None
True
"""
node = in_dict
node_path = path.split('.')
while True:
k = node_path.pop(0)
if isinstance(node, dict) and k in node:
node = node[k]
else:
return
if len(node_path) == 0:
break
return node
|
def reformat_seq(res_list):
"""Joins list of characters and removes gaps e.g. ['T', 'V', '-', 'Y'] -> 'TVY'"""
return ''.join(res_list).replace('-', '')
|
def _py_single_source_complete_paths(source, N, paths):
"""
From the dict of node parent paths, recursively return the complete path from the source to all targets
Args:
source (int/string): the source node.
N (dict): the set of nodes in the network.
paths (dict): a dict of nodes and their distance to the parent node.
Returns:
path (dict): the complete path between source and all other nodes, including source and target in the list.
"""
def __py__get_path_recursive(plist, n, source):
if n != source:
plist.append(n)
try:
n = paths[n][1]
except:
pass
else:
__py__get_path_recursive(plist, n, source)
return plist
complete_paths = {}
for n in N:
plist = __py__get_path_recursive([], n, source)
plist.append(source)
plist.reverse()
complete_paths[n] = plist
return complete_paths
|
def XlaLaunchOpCount(labels):
"""Count how many XlaLaunch labels are present."""
return sum("XlaLaunch(" in x for x in labels)
|
def index_map(array):
"""Given an array, returns a dictionary that allows quick access to the
indices at which a given value occurs.
Example usage:
>>> by_index = index_map([3, 5, 5, 7, 3])
>>> by_index[3]
[0, 4]
>>> by_index[5]
[1, 2]
>>> by_index[7]
[3]
"""
d = {}
for index, value in enumerate(array):
d.setdefault(value, []).append(index)
return d
|
def cell_background_test(val,max):
"""Creates the CSS code for a cell with a certain value to create a heatmap effect
Args:
val ([int]): the value of the cell
Returns:
[string]: the css code for the cell
"""
opacity = 0
try:
v = abs(val)
color = '193, 57, 43'
value_table = [ [0,0],
[0.25,0.25],
[0.50,0.50],
[0.75,0.75],
[1,1]]
for vt in value_table:
if v >= round(vt[0]*max) :
opacity = vt[1]
except:
# give cells with eg. text or dates a white background
color = '255,255,255'
opacity = 1
return f'background: rgba({color}, {opacity})'
|
def move_vowels(x):
"""Removes vowels from single word. Places them at end of new word."""
vowel_str = 'aeiou'
con_string = ''
vowel_string = ''
for item in x:
if vowel_str.find(item) >= 0:
vowel_string = vowel_string + item
else:
con_string = con_string + item
return con_string + vowel_string
|
def fixed_XOR(hex_bytes1: bytes, hex_bytes2: bytes) -> bytes:
""" Compute the XOR combination of hex_bytes1 and hex_bytes2, two
equal-length buffers.
"""
if len(hex_bytes1) != len(hex_bytes2):
raise ValueError("The two buffers need to be of equal-length")
return bytes(x ^ y for (x, y) in zip(hex_bytes1, hex_bytes2))
|
def property_names(cls):
"""Get the properties of a class.
Parameters
----------
cls : type
An arbitrary class.
Returns
-------
List[str]
A list of the properties defined by the input class.
"""
return [name for name in dir(cls) if isinstance(getattr(cls, name), property)]
|
def square_area(side):
"""Returns the area of a square"""
side = float(side)
if (side < 0.0):
raise ValueError('Negative numbers are not allowed')
return side**2.0
|
def is_string(arg):
"""
purpose:
check is the arg is a string
arguments:
arg: varies
return value: Boolean
"""
if arg is None:
return False
try:
str(arg)
return True
except Exception:
return False
|
def normalize_bcast_dims(*shapes):
"""
Normalize the lengths of the input shapes to have the same length.
The shapes are padded at the front by 1 to make the lengths equal.
"""
maxlens = max([len(shape) for shape in shapes])
res = [[1] * (maxlens - len(shape)) + list(shape) for shape in shapes]
return res
|
def bubble_sort(alist):
"""Implement a bubble sort."""
potato = True
while potato:
potato = False
for idx in range(len(alist) - 1):
if alist[idx] > alist[idx + 1]:
temp = alist[idx]
alist[idx] = alist[idx + 1]
alist[idx + 1] = temp
potato = True
return alist
|
def find_name(function):
"""
Given a function string in the proper format, traces out the name of the
function.
>>> find_name("f(a,b,c) = a+b+c")
'f'
>>> find_name("apple(a,e,g) = ~a")
'apple'
"""
rv = ""
for c in function:
if c != "(":
rv += c
else:
return rv
|
def count_syllables_in_word(word):
""" This function takes a word in the form of a string
and return the number of syllables. Note this function is
a heuristic and may not be 100% accurate.
"""
count = 0
endings = '.,;!?:-'
last_chair = word[-1]
if last_chair in endings:
processed_word = word[0:-1]
else:
processed_word = word
if len(processed_word) <= 3:
return 1
if processed_word[-1] in 'eE':
processed_word = processed_word[0:-1]
vowels = "aeiouAEIOU"
prev_char_was_vowel = False
for char in processed_word:
if char in vowels:
if not prev_char_was_vowel:
count = count + 1
prev_char_was_vowel = True
else:
prev_char_was_vowel = False
if processed_word[-1] in 'yY':
count = count + 1
return count
|
def create_magic_packet(macaddress):
"""
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
"""
if len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, "")
elif len(macaddress) != 12:
raise ValueError("Incorrect MAC address format")
return bytes.fromhex("F" * 12 + macaddress * 16)
|
def wrap_text(new_text, width, f, old_text = [], start_new_line = False, return_height = False, line_height = None):
"""
Break a string into lines on a screen.
Parameters:
new_text: words to convert to lines; if list or tuple, each element is
a list of words from a paragraph, with line breaks automatically
following an element; if str, split() is used to make into
paragraphs and then individual words; if new_text evaluates to
False in a Boolean context, a blank line is returned
width: maximum pixels per line (width > 0).
f: the font object to use.
Keyword Parameters:
return_height: whether the height of the rendered lines is returned;
defaults to False.
old_text: optional list of lines to which new_text is added; each
element in old_text is assumed to be a single line; its legality
given width is not checked; defaults to an empty list.
start_new_line: only applies if old_text evaluates to True in a
Boolean context; indicates whether a new line should start (i.e.,
whether new_text should begin a new line); defaults to False.
line_size: only applies if return_height is True; enotes the pixels
interpolated between lines of text; if not set but line_size is
needed, it is obtained from f.
Returns:
lines: old_text with new_text added.
optionally:
height: the height of lines when rendered with f.
"""
# Branch depending on whether additional text or a blank line is being
# added:
if new_text:
# If new_text is a string, it needs to be converted to a list.
try:
# Get paragraphs:
new_text_as_paragraphs = new_text.split("\n")
# Overwrite new_text:
new_text = []
for paragraph in new_text_as_paragraphs:
paragraph_list = paragraph.split(" ")
new_text.append(paragraph_list)
except AttributeError:
# new_text is already a list.
pass
new_lines = list(old_text)
# Check if a last line from old_text is needed:
if old_text and not start_new_line:
line = old_text[-1]
# Delete line from new_lines:
del new_lines[-1]
else:
line = ""
# Set line width:
line_width = f.size(line)[0]
# Fill new_lines paragraph by paragraph:
for paragraph in new_text:
# Fill each line word by word:
for word in paragraph:
# Unless line is currently empty, a leading space is needed
# when calculating word's width.
if line:
word_width = f.size(" "+word)[0]
else:
word_width = f.size(word)[0]
line_width = line_width+word_width
if line_width < width:
# word fits on this line.
if line:
line = line+" "+word
else:
line = word
elif line_width == width:
# word fits, but no more words will.
line = line+" "+word
new_lines.append(line)
line= ""
line_width = 0
else:
# word doesn't fit.
new_lines.append(line)
line = word
word_width = f.size(word)[0]
line_width = word_width
# Some part of a line might be left.
if line:
new_lines.append(line)
line = ""
line_width = 0
else:
# A blank line is being added to old_text.
new_lines = list(old_text)
new_lines.append("")
# Check if height is calculated:
if return_height:
# Check if line_height needs to be set:
if not line_height:
line_height = f.get_linesize()
height = height_of_strings(new_lines, f, line_height)
return new_lines, height
return new_lines
|
def get_task_parameter(task_parameters, name):
"""Get task parameter.
Args:
task_parameters (list): task parameters.
name (str): parameter name.
Returns:
Task parameter
"""
for param in task_parameters:
param_name = param.get('name')
if param_name == name:
return param
return
|
def try_to_access_dict(base_dct, *keys, **kwargs):
"""
A helper method that accesses base_dct using keys, one-by-one. Returns None if a key does not exist.
:param base_dct: dict, a dictionary
:param keys: str, int, or other valid dict keys
:param kwargs: can specify default using kwarg default_return=0, for example.
:return: obj, base_dct[key1][key2][key3]... or None if a key is not in the dictionary
"""
temp = base_dct
default_return = None
for k, v in kwargs.items():
default_return = v
try:
for key in keys:
temp = temp[key]
return temp
except KeyError: # for string keys
return default_return
except IndexError: # for array indices
return default_return
except TypeError: # might not be a dictionary or list
return default_return
|
def class_if_errors(error_list, classname):
"""
siplet tool to check if
>>> class_if_errors([{},{}],"class")
""
>>> class_if_errors([{1:1},{}],"class")
"class"
"""
for el in error_list:
if el != {}:
return classname
return ""
|
def is_sequence(numberlist: list) -> bool:
"""Is sequence
Can take a list returned by :meth:`get_numbers` and determine if
it is a sequence based on the property
``list_length == (last_element - first_element + 1)``.
Args:
numberlist: List containing integers to check for a sequence.
Returns:
True if list contains a sequence of numbers, False otherwise.
"""
return len(numberlist) == (numberlist[-1] - numberlist[0] + 1)
|
def merge_fields(base, attach={}):
"""
>>> merge_fields({'a': 1})
{'a': 1}
>>> merge_fields({'a': 1}, {'b': 2})
{'a': 1, 'b': 2}
"""
base.update(attach)
return base
|
def valToIndexMap(arr):
"""
A map from a value to the index at which it occurs
"""
return dict((x[1], x[0]) for x in enumerate(arr))
|
def describe_humidity(humidity):
"""Convert relative humidity into good/bad description."""
if 40 < humidity < 60:
description = "good"
else:
description = "bad"
return description
|
def get(name):
"""
Returns a selection by name, this can be a function or a class name, in cases of a class name then an instance of that class will be returned.
Parameters
-----------
name: str
The name of the function or class.
Returns
--------
out: function or instance of `Selection`.
"""
import inspect
target = globals()[name]
return target() if inspect.isclass(target) else target
|
def public_url(method: str) -> str:
"""Return the private URL for a given method."""
return f"public/{method}"
|
def adj(a):
"""
Calculates the adjugate of A via just swapping the values.
The formula is [[a,b],[c,d]] => [[d,-b],[-c,a]]
:param a: the matrix A(2x2)
:return:
"""
B=[[0,0],[0,0]]
B[0][0]=a[1][1]
B[0][1]=-a[0][1]
B[1][0]=-a[1][0]
B[1][1]=a[0][0]
return B
|
def gen_comment_id(reddit_obj_id):
"""
Generates the Elasticsearch document id for a comment
Args:
reddit_obj_id (int|str): The id of a reddit object as reported by PRAW
Returns:
str: The Elasticsearch document id for this object
"""
return "c_{}".format(reddit_obj_id)
|
def numel_from_size(size):
"""Multiplies all the dimensions in a torch.Size object."""
s = 1
for i in size:
s *= i
return s
|
def _istradiationalfloat(value):
"""
Checks if the string can be converted to a floating point value
Does not allow for fortran style floats, i.e -2.34321-308
only standard floats.
"""
try:
float(value)
return True
except ValueError:
return False
|
def top_files(query, files, idfs, n):
"""
Given a `query` (a set of words), `files` (a dictionary mapping names of
files to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the filenames of the the `n` top
files that match the query, ranked according to tf-idf.
"""
tf_idfs = [] # Initialize a list for files and their TF-IDF to the query
# Iterate over all files, compare each of their word to the query
for file in files:
tf_idf = 0
for q in query:
if q in files[file]: # if query is in the file, calculate TF-IDF
tf_idf += idfs[q] * files[file].count(q)
if tf_idf == 0: # if TF-IDF remained 0 (no query match), don't add the file to the rank at all
continue
tf_idfs.append((file, tf_idf))
# Sort the list according to the TF-IDF. Don't have to reverse as all values are negative
tf_idfs.sort(key=lambda x: x[1])
return [x[0] for x in tf_idfs[:n]]
|
def lines_coincide(begin1, end1, begin2, end2):
""" Returns true if the line segments intersect. """
A1 = end1[1] - begin1[1]
B1 = -end1[0] + begin1[0]
C1 = - (begin1[1] * B1 + begin1[0] * A1)
A2 = end2[1] - begin2[1]
B2 = -end2[0] + begin2[0]
C2 = - (begin2[1] * B2 + begin2[0] * A2)
if (not(A1 or B1)) or (not(A2 or B2)):
return False
return abs(A1 * B2 - A2 * B1) < 1E-10 and abs(C1 * A2 - C2 * A1) < 1E-10 and abs(C1 * B2 - C2 * B1) < 1E-10
|
def is_cspm_view(view):
""" True if the view has a cspm file loaded"""
if view is None or view.file_name() is None:
return False
return 'cspm' in view.settings().get('syntax').lower()
|
def fetch_base_path(dir_path: str) -> str:
"""Module to fetch base path of a given path.
Parameters
----------
dir_path*: string variable representing the actual path variable (absolute/relative)
(* - Required parameters)
Returns
-------
base path as string
"""
# Return the file name
if '/' in dir_path:
base_url_idx = dir_path.rindex('/')+1
return dir_path[:base_url_idx]
return dir_path
|
def bisect(f, target, interval, args=()):
"""Finds target intersection with f within an interval using the bisect method"""
iterations = 0
low, high = interval
while low <= high:
iterations += 1
mid = low + (high-low)//2
f_mid = f(mid, *args)
if f_mid == target:
return (mid, iterations)
if f_mid < target:
low = mid + 1
else:
high = mid - 1
return low-1, iterations
|
def cat(input_files, output_file):
"""Just concatenates files with the ``cat`` command"""
return {
"name": "cat: "+output_file,
"actions": ["cat %s > %s" %(" ".join(input_files), output_file)],
"file_dep": input_files,
"targets": [output_file]
}
|
def _select_cols(a_dict, keys):
"""Filters out entries in a dictionary that have a key which is not part of 'keys' argument. `a_dict` is not
modified and a new dictionary is returned."""
if keys == list(a_dict.keys()):
return a_dict
else:
return {field_name: a_dict[field_name] for field_name in keys}
|
def get_picard_max_records_string(max_records: str) -> str:
"""Get the max records string for Picard.
Create the 'MAX_RECORDS_IN_RAM' parameter using `max_records`. If
`max_records` is empty, an empty string is returned.
"""
if max_records is None or max_records == "":
return ""
else:
return " MAX_RECORDS_IN_RAM=%d" % int(max_records)
|
def split_schema_obj(obj, sch=None):
"""Return a (schema, object) tuple given a possibly schema-qualified name
:param obj: object name or schema.object
:param sch: schema name (defaults to 'public')
:return: tuple
"""
qualsch = sch
if sch == None:
qualsch = 'public'
if '.' in obj:
(qualsch, obj) = obj.split('.')
if obj[:1] == '"' and obj[-1:] == '"':
obj = obj[1:-1]
if sch != qualsch:
sch = qualsch
return (sch, obj)
|
def getMinUnvisited(unvisited, dist):
"""
return the minimum distance vertex from
the set of vertices not yet processed.
Parameters:
unvisited (set): the set containing all the vertex not yet processed
dist (dict): a dictionary with vertex as key and the total distance from the source as value
"""
aux = {key: dist[key] for key in unvisited}
minimum = min(aux.values())
for key in unvisited:
if dist[key] == minimum:
return key
|
def _ge_from_lt(self, other):
"""Return a >= b. Computed by @total_ordering from (not a < b)."""
op_result = self.__lt__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result
|
def isBoolean(s):
""" Checks to see if this is a JSON bool """
if (s == 'true') or (s == 'false'):
return True
else:
return False
|
def twoNumberSum_s1(array, targetSum):
"""
Time Complexity : O(n^2)
Space Complexity : O(1)
When going with the approch of having two for loops, the confusing part is what are th conditions
for the loop.
The trick lies here, vola !
Two arrays, its obvious from the above Rule that we are non't add a single integer to itself by
which it means the first iterator [i] will be at 0th position and the second iterator would be
j=[i+1].
Great, know we know the start, how about till when do we do it ?
When [i] is @ 0th posting which means he would have to only go till length-1th position.
Wondering why ?
If [i] is at last position, then j=i+1. This is where we get array out of bound exeception.
In order to avoid the above problem.
The first iteration should stop at length-1 postion and second iteration can stop at the length exactly.
"""
for i in range(len(array)-1):
firstnumber = array[i]
for j in range(i+1, len(array)):
secondnumber = array[j]
if firstnumber+secondnumber == targetSum:
return firstnumber, secondnumber
return []
|
def _FindFeaturesOverriddenByArgs(args):
"""Returns a list of the features enabled or disabled by the flags in args."""
overridden_features = []
for arg in args:
if (arg.startswith('--enable-features=')
or arg.startswith('--disable-features=')):
_, _, arg_val = arg.partition('=')
overridden_features.extend(arg_val.split(','))
return [f.split('<')[0] for f in overridden_features]
|
def WHo_mt(dist, sigma):
"""
Speed Accuracy model for generating finger movement time.
:param dist: euclidian distance between points.
:param sigma: speed-accuracy trade-off variance.
:return: mt: movement time.
"""
x0 = 0.092
y0 = 0.0018
alpha = 0.6
x_min = 0.006
x_max = 0.06
k_alpha = 0.12
if dist == 0:
dist = 0.0000001
mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0
return mt
|
def subreddit_in_grouping(subreddit: str, grouping_key: str) -> bool:
"""
:param subreddit: subreddit name
:param grouping_key: example: "askreddit~-~blackburn"
:return: if string is within the grouping range
"""
bounds = grouping_key.split("~-~")
if len(bounds) == 1:
print(subreddit, grouping_key)
return bounds[0] <= subreddit <= bounds[1]
|
def is_colored(page, colored):
""" O(1) !! """
h = hash(page)
return (h in colored) and (page in colored[h])
|
def get_link_props(props):
"""Return subset of iterable props that are links"""
return [val for val in props if "." in val]
|
def insert_clause(table_name, keys):
""" Create a insert clause string for SQL.
Args:
table_name: The table where the insertion will happen.
keys: An iterator with strings specifying the fields to change.
Returns:
The query as a string
"""
fields = list(keys)
fields_str = ', '.join(fields)
values_str = ', '.join(['?']*len(fields))
query = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, fields_str, values_str)
return query
|
def wc(iterable):
"""
wc(iter: iterable)
return size of "iter"
args:
iter = [[1,2], [2,3], [3,4]] iter = {}
return:
3 0
"""
i = 0
for x in iterable:
i += 1
return i
|
def filter_by_cusine(names_matching_price,cusine_to_names,cusine_list):
""" (list of str, dict of {str: list of str}, list of str) -> list of str
>>> names = ['Queen St. Cafe', 'Dumplings R Us', 'Deep Fried Everything']
>>> cuis = 'Canadian': ['Georgie Porgie'],
'Pub Food': ['Georgie Porgie', 'Deep Fried Everything'],
'Malaysian': ['Queen St. Cafe'],
'Thai': ['Queen St. Cafe'],
'Chinese': ['Dumplings R Us'],
'Mexican': ['Mexican Grill']}
>>> cuisines = ['Chinese', 'Thai']
>>> filter_by_cuisine(names, cuis, cuisines)
['Queen St. Cafe', 'Dumplings R Us']
"""
names_of_restaurants = []
for cus in cusine_list:
for nam in names_matching_price:
if nam in cusine_to_names[cus]:
names_of_restaurants.append(nam)
return names_of_restaurants
|
def lorentzian(x, x0, gamma, alpha):
"""alpha * gamma / ((x-x0)**2 + gamma ** 2)"""
return alpha * gamma / ((x-x0)**2 + gamma ** 2)
|
def hex_to_rgb(hex):
"""
Convert a hex colour to(r, g, b).
Arguments:
hex: Hexidecimal colour code, i.e. '#abc123'.
Returns: (r, g, b) tuple with values 0 - 1000.
"""
scalar = 3.9215 # 255 * 3.9215 ~= 1000.
r = int(int(hex[1:3], 16) * scalar)
g = int(int(hex[3:5], 16) * scalar)
b = int(int(hex[5:7], 16) * scalar)
return (r, g, b)
|
def matches(filename_, filenames_to_exclude_):
"""
Matches
:param filename_:
:param filenames_to_exclude_:
:return:
"""
for filename_to_exclude_ in filenames_to_exclude_:
if filename_ == filename_to_exclude_:
return True
return False
|
def is_vcf_call_line(line):
"""
Returns ``True`` if ``line`` is a VCF SNP call line, ``False`` otherwise.
:param line: line from VCF file
:return: ``bool``
"""
if line.startswith('##contig='):
return True
else:
return False
|
def header_lookup(headers):
"""The header lookup table. Assign the index for
each candidate as follow, i.e.,
var_id[patient id] = 0
var_id[survival rate] = 1
Args:
headers: the name list of candidate causal variables,
outcome, patien id, ,etc.
"""
var_id = dict()
for idx, head in enumerate(headers):
var_id[head] = idx
return var_id
|
def sort_score(modelScore):
"""
param1: Dictionary
return: Dictionary
Function returns a sorted dictionary on the basis of values.
"""
sorted_dict=dict(sorted(modelScore.items(), key=lambda item: item[1],reverse=True))
return sorted_dict
|
def relu(x, c = 0):
"""
Compute the value of the relu function with parameter c, for a given point x.
:param x: (float) input coordinate
:param c: (float) shifting parameter
:return: (float) the value of the relu function
"""
return c + max(0.0, x)
|
def parse_timedelta_from_api(data_dict, key_root):
"""Returns a dict with the appropriate key for filling a two-part timedelta
field where the fields are named <key_root>_number and <key_root>_units.
Parameters
----------
data_dict: dict
API json response containing a key matching the key_root argument.
key_root: str
The prefix used to identify the timedelta `<input>` elements. This
should match the metadata dictionaries key for accessing the value.
Returns
-------
dict
dict with keys <key_root>_number and <key_root>_units set to the
appropriate values.
Raises
------
TypeError
If the interval length key is set to a non-numeric value or does not
exist.
"""
interval_minutes = data_dict.get(key_root)
# set minutes as default interval_units, as these are returned by the API
interval_units = 'minutes'
if interval_minutes % 1440 == 0:
interval_units = 'days'
interval_value = int(interval_minutes / 1440)
elif interval_minutes % 60 == 0:
interval_units = 'hours'
interval_value = int(interval_minutes / 60)
else:
interval_value = int(interval_minutes)
return {
f'{key_root}_number': interval_value,
f'{key_root}_units': interval_units,
}
|
def fibonacci(i):
"""
This function finds the ith number in the Fibonaci series, -1 otherwise.
"""
if i <= 0:
return -1
#Fixing the code to see if test cases go through
if i <= 2:
return 1
prev = 1
curr = 1
for k in range(i-2):
temp = curr
curr += prev
prev = temp
return curr
|
def is_prime(number):
"""
>>> is_prime(3)
True
>>> is_prime(90)
False
>>> is_prime(67)
True
"""
if number <= 1:
return False
for i in range(2, number - 1):
if number % i == 0:
return False
return True
|
def is_partition(device):
"""
Check is a device is a partition
"""
if device[-1].isdigit():
return True
return False
|
def hashable_index(tuple_idx):
"""Return an hashable representation of a tuple of slice object
We add this because the slice object in python is not hashable.
Parameters
----------
tuple_idx : tuple
A tuple of slice/int objects
Returns
-------
ret : tuple
A hashable representation of the slice data
"""
l = []
for ele in tuple_idx:
if isinstance(ele, slice):
l.append(ele.__reduce__())
else:
l.append(ele)
return tuple(l)
|
def get_epoch(ckpt_name):
"""Get epoch from checkpoint name"""
start = ckpt_name.find('-')
start += len('-')
end = ckpt_name.find('_', start)
epoch = eval(ckpt_name[start:end].strip())
return epoch
|
def replace(given_text: str, sub_string: str, replacable_str: str) -> str:
"""Replace a substring with another string from a given text.
Args:
given_text (str): the full text where to be replaced
sub_string (str): the string to be replaced
replacable_str (str): the new replaced string
Returns:
str: stripped replaced text
"""
from re import sub
return sub(sub_string, replacable_str, given_text).strip()
|
def hex_to_rgb(color):
"""Convert hex color format to rgb color format.
Args:
color (int): The hex representation of color.
Returns:
The rgb representation of color.
"""
r, g, b = (color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff
return r / 255, g / 255, b / 255
|
def tes2numpy(msb_type, num_bytes, nelems=1):
"""
Converts a MSB data type to a numpy datatype
"""
valid_bytes = {
'MSB_UNSIGNED_INTEGER': [1,2,4,8,16,32,64],
'MSB_INTEGER': [1,2,4,8,16,32,64],
'IEEE_REAL': [1,2,4,8,16,32,64],
'CHARACTER': range(1,128),
'MSB_BIT_STRING': range(1,128)
}
msb_bit_string_type = [('byte{}'.format(i), '>u1') for i in range(num_bytes)]
dtype_map = {
'MSB_UNSIGNED_INTEGER': '>u{}'.format(num_bytes),
'MSB_INTEGER': '>i{}'.format(num_bytes),
'IEEE_REAL': '>f{}'.format(num_bytes),
'CHARACTER': 'a{}'.format(num_bytes),
'MSB_BIT_STRING': msb_bit_string_type
}
if num_bytes not in valid_bytes[msb_type] and nelems == 1:
raise Exception('invalid byte ({}) count for type ({})'.format(num_bytes, msb_type))
if nelems > 1:
# Must be an array
return [('elem{}'.format(i), dtype_map[msb_type]) for i in range(nelems)]
return dtype_map[msb_type]
|
def match_hex(key: str) -> str:
"""Returns a string containing a regex matching key=<hex_or_integer_string>"""
return f"{key}=((?:0x)?[0-9a-fA-F]+)"
|
def in_SCAT_box(x, y, low_bound, high_bound, x_max, y_max):
"""determines if a particular point falls within a box"""
passing = True
upper_limit = high_bound(x)
lower_limit = low_bound(x)
if x > x_max or y > y_max:
passing = False
if x < 0 or y < 0:
passing = False
if y > upper_limit:
passing = False
if y < lower_limit:
passing = False
return passing
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.