content stringlengths 42 6.51k |
|---|
def ensure_unique_app_labels(installed_apps):
"""Checks that INSTALLED APPS contains unique app labels."""
retval = ()
for val in installed_apps:
if val in retval:
continue
retval += (val, )
return retval |
def convert_to_cplus_type(json_type):
"""
convert the json type to c++ type
:param json_type: the json type
:return: c++ type.
"""
if json_type in ["boolean"]:
return "bool"
if json_type in ["number"]:
return "double"
if json_type in ["integer"]:
return "int" # uint8_t ?
if json_type in ["string"]:
return "std::string"
if json_type in ["object"]:
return "OCRepresentation"
return "void*" |
def widget_type(field):
"""
(stolen from django-widget-tweaks) Returns field widget class name (in lower case).
"""
if hasattr(field, 'field') and hasattr(field.field, 'widget') and field.field.widget:
widget_name = field.field.widget.__class__.__name__.lower()
if widget_name == "groupedchoicewidget":
widget_name = field.field.widget.widget_name
return widget_name
return '' |
def convert_to_float(number):
"""
converts the input to a float
"""
try:
return float(number)
except:
return None |
def returnCoords(coordString: str) -> tuple:
"""
Converts Human-Readable Coordinates into 2D list index
coordString - The user-input string, such as "A3" or "C2". This is not case-sensetive.
"""
y = coordString[0].lower()
columnCheck = {"a":0, "b":1, "c":2}
x = int(coordString[1])-1
y = columnCheck[y]
return (x,y) |
def personString(person):
""" Print a nice person string
e.g. mr#bryce#d#mecum#mecum@nceas.ucsb.edu
"""
person_strings = []
for field in person:
person_strings.append("%s:%s" % (field, person[field]))
return "#".join(person_strings) |
def _findString(text, s, i):
"""Helper function of findString, which is called recursively
until a match is found, or it is clear there is no match."""
# Find occurrence
i2 = text.find(s, i)
if i2 < 0:
return -1
# Find newline (if none, we're done)
i1 = text.rfind("\n", 0, i2)
if i1 < 0:
return i2
# Extract the part on the line up to the match
line = text[i1:i2]
# Count quotes, we're done if we found none
if not line.count('"') and not line.count("'") and not line.count("#"):
return i2
# So we found quotes, now really count them ...
prev = ""
inString = "" # this is a boolean combined with a flag which quote was used
isComment = False
for c in line:
if c == "#":
if not inString:
isComment = True
break
elif c in "\"'":
if not inString:
inString = c
elif prev != "\\":
if inString == c:
inString = "" # exit string
else:
pass # the other quote can savely be used inside this string
prev = c
# If we are in a string, this match is false ...
if inString or isComment:
return -i2 # indicate failure and where to continue
else:
return i2 |
def selection_sort(array):
"""
Selection sort sorts an array by placing the minimum element element
at the beginning of an unsorted array.
:param array A given array
:return the given array sorted
"""
length = len(array)
for i in range(0, length):
min_index = i # Suppose that the first (current) element is the minimum of the unsorted array
for j in range(i+1, length):
# Update min_index when a smaller minimum is found
if array[j] < array[min_index]:
min_index = j
if min_index != i:
# Swap the minimum and the initial minimum positions
array[min_index], array[i] = array[i], array[min_index]
return array |
def map_init(interface, params):
"""Intialize random number generator with given seed `params.seed`."""
import random
import numpy as np
random.seed(params['seed'])
np.random.seed(params['seed'])
return params |
def rsa_crt_dmp1(private_exponent: int, p: int) -> int:
"""
Compute the CRT private_exponent % (p - 1) value from the RSA
private_exponent (d) and p.
"""
return private_exponent % (p - 1) |
def has_one_of_attributes(node,*args) :
"""
Check whether one of the listed attributes is present on a (DOM) node.
@param node: DOM element node
@param args: possible attribute names
@return: True or False
@rtype: Boolean
"""
if len(args) == 0 :
return None
if isinstance(args[0], (tuple, list)) :
rargs = args[0]
else :
rargs = args
return True in [ node.hasAttribute(attr) for attr in rargs ] |
def check_infos(data, infos, required_infos=None):
"""Implement infos verification logic."""
if required_infos is False or required_infos is None:
return data
if required_infos is True:
return data, infos
assert isinstance(required_infos, (tuple, list))
for required, actual in zip(required_infos, infos):
assert required == actual, (required, actual)
return data |
def filetostr(filename):
"""
filetostr
"""
try:
with open(filename, "r") as stream:
return stream.read()
except:
return None |
def getDASDV(rawEMGSignal):
""" Get the standard deviation value of the the wavelength.::
DASDV = sqrt( (1 / (N-1)) * sum((x[i+1] - x[i])**2 ) for i = 1 --> N - 1
* Input:
* raw EMG Signal
* Output:
* DASDV
:param rawEMGSignal: the raw EMG signal
:type rawEMGSignal: list
:return: standard deviation value of the the wavelength
:rtype: float
"""
N = len(rawEMGSignal)
temp = []
for i in range(0, N - 1):
temp.append((rawEMGSignal[i + 1] - rawEMGSignal[i]) ** 2)
DASDV = (1 / (N - 1)) * sum(temp)
return (DASDV) |
def positions(width):
"""Helper function returning a list describing the bit positions.
Bit positions greater than 99 are truncated to 2 digits, for example,
100 -> 00 and 127 -> 27."""
return ['{0:2}'.format(i)[-2:] for i in reversed(range(width))] |
def upgrade_link(link: str) -> str:
"""
The permalink returned by praw used to have no trailing slash.
We use this old version in the json file add one here to make
sure it's consistent with new poems.
"""
if link and link[-1] != "/":
link += "/"
return link |
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in id_to_item.items()}
return item_to_id, id_to_item |
def _get_distance(x1, y1, x2, y2):
"""
:return: distance between points
"""
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5 |
def merge(L, R):
"""Merge lists."""
A = []
while len(L) > 0 and len(R) > 0:
if L[0] >= R[0]:
A.append(R.pop(0))
elif L[0] < R[0]:
A.append(L.pop(0))
return A + L + R |
def get_parts(line):
"""
Split each rule line into its constituent parts
>>> get_parts('1-3 a: abcde')
('1-3 a', 'abcde')
>>> get_parts('1-3 b: cdefg')
('1-3 b', 'cdefg')
>>> get_parts('2-9 c: ccccccccc')
('2-9 c', 'ccccccccc')
:param line:
:return:
"""
p1, p2 = line.split(":", 1)
return p1.strip(), p2.strip() |
def bus(informed_entity):
"""Decodes informed entity objects for buses and returns a human-readable route name
Args:
informed_entity (dict): The informed entity object returned by the API for a bus route
Returns:
str: The route ID
"""
route_id = informed_entity["route_id"]
# handle night buses
if len(route_id) == 4 and route_id[:3] == "991":
return "N" + route_id[-1]
elif len(route_id) == 4 and route_id[:2] == "99":
return "N" + route_id[2:]
else:
express_route = route_id[-1] == "1"
route_id = route_id[:-1]
if express_route:
route_id += "e"
return route_id |
def swap_coordinates(geo, idx1, idx2):
""" Swap the order of the coordinates of two atoms in a molecular geometry.
:param geo: molecular geometry
:type geo: automol molecular geometry data structure
:param idx1: index for one atom to swap coordinates
:type idx1: int
:param idx2: index for one atom to swap coordinates
:type idx2: int
:rtype: molecular geometry
"""
geo = [list(x) for x in geo]
geo[idx1], geo[idx2] = geo[idx2], geo[idx1]
geo_swp = tuple(tuple(x) for x in geo)
return geo_swp |
def get_video_daytime(file_name):
"""Extracts daytime when video was recorded from filename
Arguments:
file_name: The name of the file
returns hour, minutes of the video when it was recorded
"""
if file_name.find("FrontColor") != -1:
time_stop_pos = file_name.find("FrontColor")
elif file_name.find("BackColor") != -1:
time_stop_pos = file_name.find("BackColor")
else:
raise FileNotFoundError("Not a valid file")
hour = int(file_name[time_stop_pos - 8 : time_stop_pos - 6])
minutes = int(file_name[time_stop_pos - 5 : time_stop_pos - 3])
return hour, minutes |
def generalized_lorentzian(x, p):
"""
Generalized Lorentzian function.
Parameters
----------
x: numpy.ndarray
non-zero frequencies
p: iterable
p[0] = peak centeral frequency
p[1] = FWHM of the peak (gamma)
p[2] = peak value at x=x0
p[3] = power coefficient [n]
Returns
-------
model: numpy.ndarray
generalized lorentzian psd model
"""
assert p[3] > 0., "The power coefficient should be greater than zero."
return p[2] * (p[1] / 2)**p[3] * 1./(abs(x - p[0])**p[3] + (p[1] / 2)**p[3]) |
def hex_nring(npos):
"""Return the number of rings in a hexagonal layout.
For a hexagonal layout with a given number of positions, return the
number of rings.
Args:
npos (int): The number of positions.
Returns:
(int): The number of rings.
"""
test = npos - 1
nrings = 1
while (test - 6 * nrings) >= 0:
test -= 6 * nrings
nrings += 1
if test != 0:
raise RuntimeError("{} is not a valid number of positions for a "
"hexagonal layout".format(npos))
return nrings |
def check_tag(textline, tag1, tag2):
""" helper function
return (found tag[or None], is tab-separated)
"""
l = textline.strip()
if l.startswith(tag1 + " "):
return tag1, False
elif l.startswith(tag2 + " "):
return tag2, False
if l.startswith(tag1 + "\t"):
return tag1, True # item, using tab
elif l.startswith(tag2 + "\t"):
return tag2, True # correct, using tab
else:
return None, None |
def dec_2_bx(VALUE, ALPHABET, BASE, EXCEL_MODE):
"""
Convert from decimal to base X
"""
if VALUE == 0:
return ALPHABET[0]
AMOUNT_PLACES = 0
while pow(BASE, AMOUNT_PLACES) <= VALUE:
AMOUNT_PLACES += 1
RETURN_VALUE = ""
for i in range(AMOUNT_PLACES - 1, -1, -1):
for j in range(BASE, -1, -1):
if pow(BASE, i) * j <= VALUE:
if EXCEL_MODE and i == AMOUNT_PLACES - 1 and AMOUNT_PLACES > 1:
RETURN_VALUE = f"{RETURN_VALUE}{ALPHABET[j - 1]}"
else:
RETURN_VALUE = f"{RETURN_VALUE}{ALPHABET[j]}"
VALUE = VALUE - pow(BASE, i) * j
break
return RETURN_VALUE |
def collapse_topology(topology):
"""
Collapse a topology into a compact representation.
"""
# make a copy so that we don't do this in place
topology_full = list(topology)
compact = []
counter = 0
domain = topology_full.pop(0)
while topology_full:
next_domain = topology_full.pop(0)
if next_domain is domain:
counter += 1
else:
compact.append((domain, counter+1))
counter = 0
domain = next_domain
# clean up
compact.append((domain, counter+1))
return compact |
def PO2_Calc(KO2, tO2, Kr, I, qO2):
"""
Calculate PO2.
:param KO2: oxygen valve constant [kmol.s^(-1).atm^(-1)]
:type KO2 : float
:param tO2: oxygen time constant [s]
:type tO2 : float
:param Kr: modeling constant [kmol.s^(-1).A^(-1)]
:type Kr : float
:param I: cell load current [A]
:type I : float
:param qO2: molar flow of oxygen [kmol.s^(-1)
:type qO2 : float
:return: PO2 [atm] as float
"""
try:
result = ((1 / KO2) / (1 + tO2)) * (qO2 - Kr * I)
return result
except (TypeError, ZeroDivisionError):
print(
"[Error] PO2 Calculation Failed (KO2:%s, tO2:%s, Kr:%s, I:%s, qO2:%s)" %
(str(KO2), str(tO2), str(Kr), str(I), str(qO2))) |
def round_down_to_power_of_two(n):
"""Returns the nearest power-of-two less than or equal to n."""
for i in range(30, 0, -1):
p = 1 << i
if p <= n:
return p
return -1 |
def idle_time(boutlist, idle_threshold=15):
"""Takes list of times of bouts in seconds, returns idle time in seconds,
i.e. time spent without bout for longer than idle_threshold in seconds.
"""
idle_time = 0
for i in range(0, len(boutlist) - 2):
inter_bout_time = boutlist[i + 1] - boutlist[i]
if inter_bout_time > idle_threshold:
idle_time += inter_bout_time
return idle_time |
def alias_phased_obs_with_phase(x, y, start, end):
"""
:param x: a list containing phases
:param y: a list containing observations
:param start: start phase
:param end: end phase
:return: aliased phases and observations
"""
x = [float(n) for n in x]
y = [float(n) for n in y]
if start > end:
raise ValueError("Start phase can't be larger than stop phase.")
if len(x) != len(y):
raise ValueError("x and y must be the same size.")
distance = int(start - min(x))
if (distance == 0 and min(x) > start) or (distance < 0 < min(x)):
distance = distance - 1
x = [phase + distance for phase in x]
new_x = x[:]
new_y = y[:]
i = 1
while max(new_x) < end:
x_temp = [phase + i for phase in x]
new_x = new_x + x_temp
new_y = new_y + y[:]
i = i + 1
_x = []
_y = []
for phase, value in zip(new_x, new_y):
if start <= phase <= end:
_x.append(phase)
_y.append(value)
return _x, _y |
def transform_seed_objects(objects):
"""Map seed objects to state format."""
return {obj['instance_id']: {
'initial_player_number': obj['player_number'],
'initial_object_id': obj['object_id'],
'initial_class_id': obj['class_id'],
'created': 0,
'created_x': obj['x'],
'created_y':obj['y'],
'destroyed': None,
'destroyed_by_instance_id': None,
'destroyed_building_percent': None,
'deleted': False,
'destroyed_x': None,
'destroyed_y': None,
'building_started': None,
'building_completed': None,
'total_idle_time': None
} for obj in objects} |
def is_float(element):
"""
Check if an element is a float or not
"""
try:
float(element)
return True
except (ValueError, TypeError):
return False |
def get_prof_diff(t, e):
"""
Calcuate difference between profiles
"""
try:
diff = t - float(e)
except TypeError:
return None
return diff |
def make_binary_tree(fn, args, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
arguments and returns a binary tree of results/instances.
>>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
Any keyword arguments given to this function are passed to the class
initializer.
"""
count = len(args)
if not count:
raise ValueError("Called make_binary_tree with empty list")
elif count == 1:
return args[0]
half = count // 2
return fn(make_binary_tree(fn, args[:half], **kwargs),
make_binary_tree(fn, args[half:], **kwargs), **kwargs) |
def is_free(amount):
"""
Explit zero amounts are interpreted as Free!
"""
return (
amount == 0 or
amount == 0.00 or
amount == '0' or
amount == '0.00'
) |
def quaternion_solution_check(qn1, qn):
"""
Summary: Returns most likely quaternion solution between the positive rotation angle and is negative rotation angle.
Metric to decide is the minimization of the L2 norm.
Parameters:
* q is a quaternion of [q0, q1, q2, q3] in a list
* qn1 is the previous timestep quaternion
* qn is the current timestep quaternion
"""
#Make opposite quaternion
neg_qn = [-1*qi for qi in qn]
#Scoring based off L2 norm since update rate is so high
sp = 0
sn = 0
for qn1_i, qn_i, neg_qn_i in zip(qn1, qn, neg_qn):
sp += (qn1_i - qn_i) ** 2
sn += (qn1_i - neg_qn_i) ** 2
#Return most likely
if sn < sp:
return neg_qn
else:
return qn |
def get_property_by_name(exchange, name):
"""Get property object with name ``name`` from exchange ``exchange``.
Returns an empty dictionary if the property named ``name`` is not found."""
for prop in exchange.get("properties", []):
if prop['name'] == name:
return prop
return {} |
def merge(dict1,dict2):
"""
Returns a new dictionary merging (joining keys) dict1
and dict2.
If a key appears in only one of dict1 or dict2, the
value is the value from that dictionary. If it is in
both, the value is the sum of values.
Example: merge({'a':1,'b':2},{'b':3,'c':4}) returns
{'a':1,'b':5,'c':4}
Parameter dict1: The first dictionary to merge
Precondition: dict1 a dictionary with int or float values
Parameter dict2: The second dictionary to merge
Precondition: dict2 a dictionary with int or float values
"""
result = dict(dict1) # Makes a (shallow) copy
for k in dict2:
if k in dict1:
result[k] = result[k]+1
else:
result[k] = 1
return result |
def ref_mode(mode):
"""
Defines reference pixels for different imaging modes.
Parameters:
mode - string containing imaging mode.
Returns:
xref, yref - Floating point reference pixel coordinates
"""
xref, yref = 692.5, 511.5
xref_slit, yref_slit = 325.13, 299.7
xref_slitless, yref_slitless = 37.5, 300.
BRIGHTSKY_x, BRIGHTSKY_y = 711.5, 305.5
SUB256_x, SUB256_y = 539.5, 177.5
SUB128_x, SUB128_y = 69.5, 951.5
SUB64_x, SUB64_y = 37.5, 809.5
if "SLITLESS" in mode:
xref = xref_slitless
yref = yref_slitless
elif "SLIT" in mode:
xref = xref_slit
yref = yref_slit
elif "BRIGHTSKY" in mode:
xref = BRIGHTSKY_x
yref = BRIGHTSKY_y
elif "256" in mode:
xref = SUB256_x
yref = SUB256_y
elif "128" in mode:
xref = SUB128_x
yref = SUB128_y
elif "64" in mode:
xref = SUB64_x
yref = SUB64_y
else:
xref = xref
yref = yref
return xref, yref |
def clean_line(line):
"""
Remove \ufeff\r characters
Remove \t \n \r
Remove additional characters
"""
return line.replace('\ufeff\r', '').replace('\t', ' ').replace('\n', '').\
replace('\r', '').replace('(', '').replace(')', '').replace("'", '').\
replace('"', '').replace(',', '').replace('.', '').replace('*', '') |
def findKey(dict_, search):
"""Find a key in a dictionary. Uses '#text' format to help with
the xml dictionaries.
Args:
dict_: Haystack to search for.
search: Needle; key to search for.
Returns:
Value of dict_[search]
"""
data = {}
if len(dict_) > 0:
for ret, value in dict_.items():
if search in ret:
if '#text' in value:
return value['#text']
return value |
def form_results_name(code, llx, lly):
"""
Form valid dataset name using OpenTopography bulk raster naming convention
for EarthScope data.
"""
return code + str(llx) + '_' + str(lly) + '_results.tif' |
def _build_udf_resources(resources):
"""
:type resources: sequence of :class:`UDFResource`
:param resources: fields to be appended.
:rtype: mapping
:returns: a mapping describing userDefinedFunctionResources for the query.
"""
udfs = []
for resource in resources:
udf = {resource.udf_type: resource.value}
udfs.append(udf)
return udfs |
def merge_dictionaries(dic1, dic2):
"""Merge two dictionaries
:param dic1 - dictionary
:param dic2 - dictionary
:return dictionary"""
result = dic1.copy()
for key in dic2:
if key in dic1.keys():
result[key] += dic2[key]
else:
result[key] = dic2[key]
return result |
def AmOppCreditParts(exact, e87521, num, c00100, CR_AmOppRefundable_hc,
CR_AmOppNonRefundable_hc, c10960, c87668):
"""
Applies a phaseout to the Form 8863, line 1, American Opportunity Credit
amount, e87521, and then applies the 0.4 refundable rate.
Logic corresponds to Form 8863, Part I.
Notes
-----
Tax Law Parameters that are not parameterized:
90000 : American Opportunity Credit phaseout income base
10000 : American Opportunity Credit phaseout income range length
1/1000 : American Opportunity Credit phaseout rate
0.4 : American Opportunity Credit refundable rate
Parameters
----------
exact : whether or not to do rounding of phaseout fraction
e87521 : total tentative American Opportunity Credit for all students,
Form 8863, line 1
num : number of people filing jointly
c00100 : AGI
CR_AmOppRefundable_hc: haircut for the refundable portion of the
American Opportunity Credit
CR_AmOppNonRefundable_hc: haircut for the nonrefundable portion of the
American Opportunity Credit
Returns
-------
c10960 : Refundable part of American Opportunity Credit
c87668 : Tentative nonrefundable part of American Opportunity Credit
"""
if e87521 > 0.:
c87658 = max(0., 90000. * num - c00100)
c87660 = 10000. * num
if exact == 1: # exact calculation as on tax forms
c87662 = 1000. * min(1., round(c87658 / c87660, 3))
else:
c87662 = 1000. * min(1., c87658 / c87660)
c87664 = c87662 * e87521 / 1000.
c10960 = 0.4 * c87664 * (1. - CR_AmOppRefundable_hc)
c87668 = c87664 - c10960 * (1. - CR_AmOppNonRefundable_hc)
else:
c10960 = 0.
c87668 = 0.
return (c10960, c87668) |
def parse_first_smartystreets_result(result: list) -> dict:
"""
Given an address *result* from SmartyStreets geocoding API, parse the
canonicalized address and lat/lng information of the first result
to return as a dict.
If address is invalid and response is empty, then returns a dict with
empty string values
"""
response = {
'canonicalized_address': '',
'lat': '',
'lng': ''
}
if result:
first_candidate = result[0]
address = ' '.join([
first_candidate.delivery_line_1,
first_candidate.last_line
])
response['canonicalized_address'] = address
response['lat'] = first_candidate.metadata.latitude
response['lng'] = first_candidate.metadata.longitude
return response |
def isPrompt(line, prompt_list):
"""
Determine if the target string contains a prompt.
"""
for prompt in prompt_list:
if prompt in line:
return True
return False |
def dot(vector1, vector2):
"""
:return: The dot (or scalar) product of the two vectors
"""
return vector1[0] * vector2[0] + vector1[1] * vector2[1] |
def are_floats(items):
"""
detect if all items are floats
"""
for i in items:
try:
float(i) if i is not None and len(i) > 0 else None
except ValueError:
return False
return True |
def aggregate_ontology(entries):
""" Group and count GO terms """
ret_ontology = dict()
for entry in entries:
ontology = entries[entry].ontology
if not ontology or ontology[0].rstrip() == "":
continue
for term in ontology:
if term not in ret_ontology:
ret_ontology[term] = 0
ret_ontology[term] += entries[entry].count
return ret_ontology |
def fold(dots, axis, line):
"""Return the dots folded along line, given axis x or y."""
if axis == 'x':
return {(line - abs(line - x), y) for (x, y) in dots}
else:
return {(x, line - abs(line - y)) for (x, y) in dots} |
def geodesic_ify(splitting_name, splitting_string, n_geodesic_steps=1):
"""Replace every appearance of R with several Rs"""
Rs = " ".join(["R"]*n_geodesic_steps)
new_name = splitting_name + " ({})".format(n_geodesic_steps)
new_splitting_string = splitting_string.replace("R", Rs)
return new_name, new_splitting_string |
def is_dash_option(string):
"""Whether that string looks like an option
>>> is_dash_option('-p')
True
"""
return string[0] == '-' |
def ParseFloat(num_str):
"""Parse number string to float."""
try:
return float(num_str)
except (ValueError, TypeError):
return None |
def median(v):
"""finds the 'middle-most' value of v"""
n = len(v)
sorted_v = sorted(v)
midpoint = n // 2
if n % 2 == 1:
# if odd, return the middle value
return sorted_v[midpoint]
else:
# if even, return the average of the middle values
lo = midpoint - 1
hi = midpoint
return (sorted_v[lo] + sorted_v[hi]) / 2 |
def iterativeFactorial(num):
"""assumes num is a positive int
returns an int, num! (the factorial of n)
"""
factorial = 1
while num > 0:
factorial = factorial*num
num -= 1
return factorial |
def match_lists(list1, list2):
"""to find the number of matching items in each list use sets"""
set1 = set(list1)
set2 = set(list2)
# set3 contains all items comon to set1 and set2
set3 = set1.intersection(set2)
# return number of matching items
return len(set3) |
def dynamic_path(input_str):
"""Normalise dynamic paths"""
# Not needed at the moment
return input_str |
def image_topic_basename(topic):
""" A convenience method for stripping the endings off an image topic"""
endings = ['compressed', 'encoding', 'image_raw']
for e in endings:
if topic.endswith(e):
return topic[:-1 * len(e)]
return None |
def check_key(dictionary, key, default_value):
"""
Returns the value assigned to the 'key' in the ini file.
Parameters:
dictionary : [dict]
key : [string|int]
default_value : [string]
Output: Value assigned to the 'key' into the 'dictionary' (or default value if not found)
"""
if key in dictionary.keys():
return dictionary[key]
else:
return default_value |
def handle_keyword_duplicates(item):
"""
Sometimes keywords are combined together: "Condition, Context,
Effect: ..." In this case, we remove duplicates and preserve the
first keyword match.
"""
values = item.values()
unique_values = set(values)
if len(values) == len(unique_values):
return item
new_item = dict()
for key, value in item.items():
if value in unique_values:
new_item[key] = value
unique_values.remove(value)
return new_item |
def nbsp(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0") |
def get_pymofa_id(parameter_combination):
"""
Get a unique ID for a `parameter_combination` and ensemble index `i`.
ID is of the form 'parameterID_index_ID.pkl'
Parameters
----------
parameter_combination : tuple
The combination of Parameters
Returns
-------
ID : string
unique ID or pattern plus the ".pkl" ending
"""
res = str(parameter_combination) # convert to sting
res = res[1:-1] # delete brackets
res = res.replace(", ", "-") # remove ", " with "-"
res = res.replace(".", "o") # replace dots with an "o"
res = res.replace("'", "") # remove 's from values of string variables
# Remove all the other left over mean
# characters that might fuck with you
# bash scripting or wild card usage.
for mean_character in "[]()^ #%&!@:+={}'~":
res = res.replace(mean_character, "")
return res |
def get_geoindex(geo_matrix, index_pixel):
""" This functions returns geo-coordinates of pixels of image coordinate.
Args:
param geo_matrix: this is a tuple which tells us which image coordinate corresponds
to which geo-coordinates.
type geo_matrix: tuple of 2 list.
param index_pixel: coordinates of our reqired point in image.
type index_pixel: tuple.
Returns:
returns the geo coodinates of a coordinates of a given pixel in an image
"""
if geo_matrix is None or index_pixel is None:
raise TypeError("NoneType value in one of the arguments")
if not isinstance(geo_matrix,tuple):
raise TypeError('Please provide a tuple for geo_matrix argument.')
if not isinstance(index_pixel,tuple):
raise TypeError('Please provide a tuple for index_pixel.')
vX_geo = geo_matrix[0][index_pixel[0]]
vL = len(geo_matrix[1])
# instead of using this modify the y as difference value.
vY_geo = geo_matrix[1][index_pixel[1]]
#vY_geo = geo_matrix[1][vL - index_pixel[1]]
return (vX_geo,vY_geo) |
def map_bound(value, in_low, in_high, out_low, out_high):
"""map with high and low bound handling."""
result = None
if value <= in_low:
result = out_low
else:
if value >= in_high:
result = out_high
else:
# http://stackoverflow.com/a/5650012/574981
result = out_low + \
(out_high - out_low) * (value - in_low) / (in_high - in_low)
return result |
def cndexp(condition, truevalue, falsevalue):
"""Simulates a conditional expression known from C or Python 2.5.
:Parameters:
condition : any
Tells what should be returned.
truevalue : any
Value returned if condition evaluates to True.
falsevalue : any
Value returned if condition evaluates to False.
:return: Either truevalue or falsevalue depending on condition.
:rtype: same as type of truevalue or falsevalue
"""
if condition:
return truevalue
return falsevalue |
def _convert_platform_to_omahaproxy_platform(platform):
"""Converts platform to omahaproxy platform for use in
get_production_builds_info."""
platform_lower = platform.lower()
if platform_lower == 'windows':
return 'win'
return platform_lower |
def in_bisect(word_list, target):
""" Takes a sorted word list and checks for presence of target word using bisection search"""
split_point = (len(word_list) // 2)
if target == word_list[split_point]:
return True
if len(word_list) <= 1:
return False
if target < word_list[split_point]:
return in_bisect(word_list[0:split_point], target)
else:
return in_bisect(word_list[split_point:], target) |
def check_detection_overlap(gs, dd):
"""
Evaluates if two detections overlap
Paramters
---------
gs: list
Gold standard detection [start,stop]
dd: list
Detector detection [start,stop]
Returns
-------
overlap: bool
Whether two events overlap.
"""
overlap = False
# dd stop in gs + (dd inside gs)
if (dd[1] >= gs[0]) and (dd[1] <= gs[1]):
overlap = True
# dd start in gs + (dd inside gs)
if (dd[0] >= gs[0]) and (dd[0] <= gs[1]):
overlap = True
# gs inside dd
if (dd[0] <= gs[0]) and (dd[1] >= gs[1]):
overlap = True
return overlap |
def manhattan_distance(pos1: tuple, pos2: tuple):
"""
Compute Manhattan distance between two points
:param pos1: Coordinate of first point
:param pos2: Coordinate of second point
:return: Manhattan distance between two points
"""
distance = 0
for ind in range(len(pos1)):
distance += abs(pos1[ind] - pos2[ind])
return distance |
def get_partial_dict(prefix, dictionary, container_type=dict):
"""Given a dictionary and a prefix, return a Bunch, with just items
that start with prefix
The returned dictionary will have 'prefix.' stripped so::
get_partial_dict('prefix', {'prefix.xyz':1, 'prefix.zyx':2, 'xy':3})
would return::
{'xyz':1,'zyx':2}
"""
match = prefix + "."
n = len(match)
new_dict = container_type(((key[n:], dictionary[key])
for key in dictionary
if key.startswith(match)))
if new_dict:
return new_dict
else:
raise AttributeError(prefix) |
def parse_phot_header(header):
""" Parses my photometry file header.
Parameters
----------
header : dictionary
The header of photometry file.
"""
keys = header['key']
vals = header['val']
parsed_header = {}
for key, val in zip(keys, vals):
parsed_header[key.lower()] = val
return parsed_header |
def rough_calibration(pis, mission):
"""Make a rough conversion betwenn PI channel and energy.
Only works for NICER, NuSTAR, and XMM.
Parameters
----------
pis: float or array of floats
PI channels in data
mission: str
Mission name
Returns
-------
energies : float or array of floats
Energy values
Examples
--------
>>> rough_calibration(0, 'nustar')
1.6
>>> # It's case-insensitive
>>> rough_calibration(1200, 'XMm')
1.2
>>> rough_calibration(10, 'asDf')
Traceback (most recent call last):
...
ValueError: Mission asdf not recognized
>>> rough_calibration(100, 'nicer')
1.0
"""
if mission.lower() == "nustar":
return pis * 0.04 + 1.6
elif mission.lower() == "xmm":
return pis * 0.001
elif mission.lower() == "nicer":
return pis * 0.01
raise ValueError(f"Mission {mission.lower()} not recognized") |
def flatten_corner(corner_kick, game_id):
"""Flatten the schema of a corner kick."""
ck_id = corner_kick[0]
ck_data = corner_kick[1]
return {'game_id': game_id,
'ck_id': ck_id,
'time_of_event(min)': (ck_data['t']['m'] + (ck_data['t']['s'] / 60 )),
# 'assist': ck_data.get('assBy', None),
'player_id': float(ck_data['plyrId']),
'ck_coord_x1': ck_data['coord']['1']['x'],
'ck_coord_y1': ck_data['coord']['1']['y'],
'ck_coord_z1': ck_data['coord']['1']['z'],
'ck_coord_x2': ck_data['coord']['2']['x'],
'ck_coord_y2': ck_data['coord']['2']['y'],
'ck_coord_z2': ck_data['coord']['2']['z']} |
def add_filter_names(headerlist, filter_names, filter_labels, filters):
"""
Add a set of filter header labels (i.e. niriss_f090w_magnitude for example)
to a list, by matching filter names.
Parameters
----------
headerlist : list
An existing (possibly empty) list to hold the header string for the
output magnitudes
filter_names : list
The list of available filter names to match to
filter_labels : list
The corresponding list of filter labels
filters : list
The list of filter names to match, or an empty list or None to get
all available filter labels
Returns
-------
headerlist : list
The revised list of labels with the filter labels requested appended
"""
try:
n1 = len(filters)
except:
n1 = 0
if (filters is None) or (n1 == 0):
for loop in range(len(filter_labels)):
headerlist.append(filter_labels[loop])
if n1 > 0:
for loop in range(n1):
for k in range(len(filter_names)):
if filters[loop].lower() == filter_names[k].lower():
headerlist.append(filter_labels[k])
return headerlist |
def transform_y(transform, y_value):
"""Applies a transform matrix to a y coordinate."""
return int(round(y_value * transform[1][1])) |
def mod(x, y):
"""
Mappable modulo function
:param x: First number
:param y: Second number
:return: x % y
"""
return [a % b for a, b in zip(x, y)] |
def get_soloed_layers(layers):
"""Given a list of layers, return only those that are soloed
:param layers: list of layers to filter
:type layers: list
:return: list of layers that are soloed.
:rtype: list
"""
return [layer for layer in layers if layer.get_soloed()] |
def fix_frequency(frequency):
"""
Fixes the frequency format for RS-UV3.
"""
return frequency.replace('.', '') |
def clean_list(string):
"""
Strip optional characters from list for sake of comparing in tests
:param string: the list to clean represented as a string
:return: the clean list a as a string
"""
return string.replace('[', '').replace(']', '').strip() |
def extract_prefix(kwords):
"""Converts dict of {w:count} to {w[:-1]:{w[-1]:count}}"""
result = {}
for w, count in kwords.items():
prefix = w[:-1]
suffix = w[-1]
if prefix not in result:
result[prefix] = {}
curr = result[prefix]
if suffix not in curr:
curr[suffix] = {}
curr[suffix] = count
return result |
def cubic_easeinout(pos):
"""
Easing function for animations: Cubic Ease In & Out
"""
if pos < 0.5:
return 4 * pos * pos * pos
fos = (2 * pos) - 2
return 0.5 * fos * fos * fos + 1 |
def _unescape_xml(xml):
""" Replace escaped xml symbols with real ones. """
return xml.replace('<', '<').replace('>', '>').replace('"', '"') |
def calcp(kA, kV, cosphi = 1.0):
""" Calculates three phase P in MW from kA and kV
takes an optional input power factor"""
return 3**(0.5) * kV * kA * cosphi |
def filter(inputstr):
""" Trim last character and replace NaN,Inf by a high value (1000)
"""
_str = inputstr[0][:-1]
return _str.replace('NaN', '2147483647').replace('Inf', '2147483647') |
def _is_sld(model_info, id):
"""
Return True if parameter is a magnetic magnitude or SLD parameter.
"""
if id.startswith('M0:'):
return True
if '_pd' in id or '.' in id:
return False
for p in model_info.parameters.call_parameters:
if p.id == id:
return p.type == 'sld'
# check through kernel parameters in case it is a named as a vector
for p in model_info.parameters.kernel_parameters:
if p.id == id:
return p.type == 'sld'
return False |
def _human_readable(size_in_bytes):
"""Convert an integer number of bytes into human readable form
E.g.
_human_readable(500) == 500B
_human_readable(1024) == 1KB
_human_readable(11500) == 11.2KB
_human_readable(1000000) ==
"""
if size_in_bytes < 1024:
return "{}B".format(size_in_bytes)
ctr = -1
while True:
if size_in_bytes / 1024.0 < 1:
break
size_in_bytes /= 1024.0
ctr += 1
size_grps = ['KB', 'MB', 'GB', 'TB', 'PB']
return "{:.2f}{}".format(size_in_bytes, size_grps[ctr]) |
def dict_to_cidr(obj):
"""
Take an dict of a Network object and return a cidr-formatted string.
:param obj:
Dict of an Network object
"""
return '%s/%s' % (obj['network_address'], obj['prefix_length']) |
def wrap_list(item):
"""
Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned.
"""
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item] |
def add_to_visited_places(x_curr: int, y_curr: int, visited_places: dict) -> dict:
"""
Visited placs[(x,y)] contains the number of visits
that the cell (x,y). This updates the count to the
current position (x_curr, y_curr)
Args:
x_curr: x coordinate of current position
y_curr: y coordinate of current position
visited_place: dict defined above
Returns:
visited_place: updated dict
"""
if (x_curr, y_curr) not in visited_places:
visited_places[(x_curr, y_curr)] = 1
else:
visited_places[(x_curr, y_curr)] += 1
return visited_places |
def get_renamed_pool5_vgg_weight_keys(curr_model_state_dict, saved_ckpt_dict):
"""
Load backbone conv weights from a traditional bernoulli dropout model
into an x* heteroscedastic dropout model. These weights extend from conv1 to pool5.
"""
updated_dict = {}
for model_key in curr_model_state_dict.keys():
saved_key = model_key.replace('x_conv_layers', 'features')
if saved_key in saved_ckpt_dict:
print(' loaded weight for %s as %s ' % (model_key, saved_key) )
updated_dict[model_key] = saved_ckpt_dict[saved_key]
else:
print( 'key not in saved ckpt dict: ', model_key )
return updated_dict |
def is_written_by(author_first_name, author_last_name, paper):
"""
Judge whether the scraped paper belongs to the given author or not.
:param author_first_name: string. First name of the author.
:param author_last_name: string. Last name of the author.
:param paper: string. arXiv papers scraped online.
"""
if (paper.find(author_first_name) >= 0 and paper.find(author_last_name) >= 0) and len(paper) > 0:
return True
else:
return False |
def _manage_words(words, save_to=None):
"""just return or write to file"""
if save_to is None:
return words
with open(save_to, 'w+') as file:
file.write('\n'.join(words)) |
def tol(shots):
"""Numerical tolerance to be used in tests."""
if shots == 0:
# analytic expectation values can be computed,
# so we can generally use a smaller tolerance
return {"atol": 0.01, "rtol": 0}
# for non-zero shots, there will be additional
# noise and stochastic effects; will need to increase
# the tolerance
return {"atol": 0.05, "rtol": 0.1} |
def sst_freeze_check(insst, sst_uncertainty=0.0, freezing_point=-1.80, n_sigma=2.0):
"""
Compare an input SST to see if it is above freezing.
:param insst: the input SST
:param sst_uncertainty: the uncertainty in the SST value, defaults to zero
:param freezing_point: the freezing point of the water, defaults to -1.8C
:param n_sigma: number of sigma to use in the check
:type insst: float
:type sst_uncertainty: float
:type freezing_point: float
:type n_sigma: float
:return: 1 if the input SST is below freezing point by more than twice the uncertainty, 0 otherwise
:return type: integer
This is a simple freezing point check made slightly more complex. We want to check if a
measurement of SST is above freezing, but there are two problems. First, the freezing point
can vary from place to place depending on the salinity of the water. Second, there is uncertainty
in SST measurements. If we place a hard cut-off at -1.8, then we are likely to bias the average
of many measurements too high when they are near the freezing point - observational error will
push the measurements randomly higher and lower, and this test will trim out the lower tail, thus
biasing the result. The inclusion of an SST uncertainty parameter *might* mitigate that.
"""
assert sst_uncertainty is not None and freezing_point is not None
# fail if SST below the freezing point by more than twice the uncertainty
result = 0
if insst is not None:
if insst < (freezing_point - n_sigma * sst_uncertainty):
result = 1
assert result == 1 or result == 0
return result |
def get_decision(data, decision_value):
"""Get the decision depending the decision value."""
result = map(lambda x : x > 0.6, data)
return list(result) |
def safe_filename(name):
"""Utility function to make a source short-name safe as a
filename."""
name = name.replace("/", "-")
return name |
def attrs(gid='.', tid=None, exn=None, gbio=None, tbio=None, iid=None, bio=None):
"""Make gtf attributes (to shorten line)."""
attrs_ = ['gene_id "{}"'.format(gid)]
if tid: # transcript_id
attrs_.append('transcript_id "{}"'.format(tid))
if exn: # exon number
attrs_.append('exon_number "{}"'.format(exn))
if gbio: # gene biotype
attrs_.append('gene_type "{}"'.format(gbio))
if tbio: # transcript biotype
attrs_.append('transcript_type "{}"'.format(tbio))
if iid: # intergenic ID number
attrs_.append('ID "{}"'.format(iid))
if bio: # intergenic ID number
attrs_.append('biotype "{}"'.format(bio))
return '; '.join(sorted(attrs_)) + ';' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.