content stringlengths 42 6.51k |
|---|
def merge_dicts(*args):
"""
Creates a new dictionary that merges those passed as arguments.
"""
if not args:
return {}
d = args[0].copy()
for extra in args[1:]:
d.update(extra)
return d |
def get_campus(text):
"""
Check which campus is mentioned in the given text.
A campus can be denoted by its full name or by its three-letter acronym.
Args:
text: The text in which the occurrence of the campuses is checked.
Returns:
A list with acronyms for all UAntwerp campuses that were mentioned in the text. Defaults to CMI if no campus is
explicitly mentioned.
"""
campus_options = [('cde', ['cde', 'drie eiken']), ('cgb', ['cgb', 'groenenborger']),
('cmi', ['cmi', 'middelheim']), ('cst', ['cst', 'stad', 'city'])]
campus = sorted([c_code for c_code, c_texts in campus_options if any(c_text in text for c_text in c_texts)])
return campus if len(campus) > 0 else ['cmi'] |
def transpose_shape(shape, target_format, spatial_axes):
"""Converts a tuple or a list to the correct `data_format`.
It does so by switching the positions of its elements.
# Arguments
shape: Tuple or list, often representing shape,
corresponding to `'channels_last'`.
target_format: A string, either `'channels_first'` or `'channels_last'`.
spatial_axes: A tuple of integers.
Correspond to the indexes of the spatial axes.
For example, if you pass a shape
representing (batch_size, timesteps, rows, cols, channels),
then `spatial_axes=(2, 3)`.
# Returns
A tuple or list, with the elements permuted according
to `target_format`.
# Example
# Raises
ValueError: if `value` or the global `data_format` invalid.
"""
if target_format == 'channels_first':
new_values = shape[:spatial_axes[0]]
new_values += (shape[-1],)
new_values += tuple(shape[x] for x in spatial_axes)
if isinstance(shape, list):
return list(new_values)
return new_values
elif target_format == 'channels_last':
return shape
else:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(target_format)) |
def parse_id(string):
"""Returns the UUID part of a string only."""
return string.split('/')[-1] |
def de_escape_string(*items)->list:
"""
Removes escape symbols from strings.
:param items: strings that have to be de-escaped
:return list: list of unescaped strings
"""
new_items = []
for item in items:
item_list = list(item)
new_string = ''
while item_list:
char = item_list.pop(0)
if char == '\\':
new_string += item_list.pop(0)
else:
new_string += char
new_items.append(new_string)
return new_items |
def check_length(line, min=0, max=0):
"""Does a length check on the line
Params:
line (unicode)
min (int)
max (int)
Returns
true if length is ok
"""
status = True
if min and status:
status = len(line) >= min
if max and status:
status = len(line) < max
return status |
def quote_nan(value):
"""
Returns value in single quotes if value is not a number
>>> quote_nan('foo')
"'foo'"
>>> quote_nan('1')
'1'
"""
try:
float(value)
return value
except ValueError:
return "'{}'".format(value) |
def get_coordinate(coordinate, change, limit):
"""Get next coordinate, within 0 and limit."""
next_coordinate = coordinate + change
if next_coordinate < 0:
return 0
elif next_coordinate >= limit:
return limit - 1
return next_coordinate |
def S_VAR_ASSIGN(vardec, assign, data):
"""Evaluates an S_STATEMENT node"""
return vardec + "=" + data; |
def bio2ot_ts(ts_tag_sequence):
"""
perform bio-->ot for ts tag sequence
:param ts_tag_sequence:
:return:
"""
new_ts_sequence = []
n_tags = len(ts_tag_sequence)
for i in range(n_tags):
ts_tag = ts_tag_sequence[i]
if ts_tag == 'O':
new_ts_sequence.append('O')
else:
pos, sentiment = ts_tag.split('-')
new_ts_sequence.append('T-%s' % sentiment)
return new_ts_sequence |
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated |
def _get_color(color_tuple):
"""
Converts and returns the color in the following format:
In canse of a rgb tuple:
#%02x%02x%02x (i.e: #00FFCC)
In case of a rgba tuple:
rgba(color_tuple)
"""
if len(color_tuple) == 3:
return '#%02x%02x%02x' % color_tuple
return 'rgba%s' % str(color_tuple) |
def keyrange(start, end, duration=1000, **kwargs):
"""
Returns a list of frames sharing the same parameters
:param start: the first frame number for a range of frames used to
build a list of keyframes that share the given parameters
:param end: the last frame number for a range of frames used to
build a list of keyframes that share the given parameter
:param duration: time in milliseconds for every keyframe
Any additional parameters availble for keyfr() are allowed and will
be set for each keyframe in the list.
"""
frame_list = []
for fr in range(start, end):
kwargs.update(frame=fr, duration=duration)
frame_list.append(kwargs.copy())
return frame_list |
def match(e, s):
"""Very naive NFA implementation (``*`` only, no parens or pipe)"""
e = e + '\0' # None is accept state
nfa = set([0])
i = 0
while nfa:
newnfa = set()
for j in nfa:
if e[j] == '\0':
return True
elif e[j] == '*':
newnfa.add(j - 1)
newnfa.add(j + 1)
elif e[j + 1] == '*':
newnfa.add(j)
newnfa.add(j + 2)
else:
newnfa.add(j)
nfa = newnfa
newnfa = set()
for j in nfa:
if e[j] == '\0':
return True
elif i < len(s) and (e[j] == '.' or e[j] == s[i]):
newnfa.add(j + 1)
nfa = newnfa
return False |
def decode(text):
"""
Decodes the text using run-length encoding
"""
lastIndex = len(text)
result = ""
i = 0
while i < len(text):
char = text[i]
if i == lastIndex - 1:
result += char
break
nextChar = text[i + 1]
if nextChar.isdigit():
result += char * int(nextChar)
i += 1
else:
result += char
i += 1
return result |
def is_position_legal(position, board):
"""Return whether the given position is a legal position for given board."""
return 0 <= position[0] < len(board) and 0 <= position[1] < len(board) |
def rbndvi(b2, b4, b8):
"""
Red-Blue NDVI (Wang et al., 2007).
.. math:: RBNDVI = (b8 - (b4 + b2))/(b8 + (b4 + b2))
:param b2: Blue.
:type b2: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns RBNDVI: Index value
.. Tip::
Wang, F. M., Huang, J. F., Tang, Y. L., Wang, X. Z. 2007. New \
vegetation index and its application in estimating leaf area index \
of rice. Rice Science 14(3), 195-203. \
doi:10.1016/S1672-6308(07)60027-4.
"""
RBNDVI = (b8 - (b4 + b2))/(b8 + (b4 + b2))
return RBNDVI |
def init_loop_state(file_counter=0):
"""
Initialize the file row counter, the file counter,
and the list representing file data.
Janky, I know, but this needed to be done in 2 spots.
"""
file_row_counter = 0
file_counter += 1
file_data = []
file_data.append([
'Date',
'Weight (lb)',
'Fat mass (lb)'
])
return (file_row_counter, file_counter, file_data) |
def check_methods(class_obj, *methods):
"""Check whether the listed methods are implemented in the provided class.
Args:
class_obj - the class object
methods - a list of method names
Returns:
either True (if the methods are implemented) or NotImplemented
"""
mro = class_obj.__mro__
for method in methods:
for base_class_obj in mro:
if method in base_class_obj.__dict__:
if base_class_obj.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True |
def _patient_graph_id(patient_id: str) -> str:
"""
Qualify a patient ID for use in the Graph API.
"""
if '-' not in patient_id:
patient_id = f"patient-{patient_id}"
if ',' not in patient_id:
patient_id = f"{patient_id},patient"
return patient_id |
def epa_nei_url_helper(build_url, config, args):
"""
Takes the basic url text and performs substitutions based on NEI year and version.
Returns the finished url.
"""
urls = []
url = build_url
url = url.replace('__year__', args['year'])
if args['year'] == '2017':
url = url.replace('__version__', '2017v1/2017neiApr')
elif args['year'] == '2014':
url = url.replace('__version__', '2014v2/2014neiv2')
elif args['year'] == '2011':
url = url.replace('__version__', '2011v2/2011neiv2')
elif args['year'] == '2008':
url = url.replace('__version__', '2008neiv3')
urls.append(url)
return urls |
def unzip_lst(lst):
"""
unzip a list of tuples/lists to multiple lists
"""
unzipped = list(zip(*lst))
unzipped_lsts = [list(tp) for tp in unzipped]
return unzipped_lsts |
def get_openmvg_image_path(kapture_image_name: str, flatten_path: bool = False):
""" the openmvg image sub path corresponding to the given kapture one. """
return kapture_image_name if not flatten_path else kapture_image_name.replace('/', '_') |
def indent(n: int) -> str:
"""Return `n` indents."""
return " " * n |
def evaluateInteriorInverseBarrier(
function, position,
inequalityConstraints=[],
equalityConstraints=[],
rp=1.0):
"""returns a float at the location selected with constraint penalties"""
objectiveValue = function(position)
ineq_constraint_penalty = 0
for constraint in inequalityConstraints:
constraint_value = constraint(position)
if ineq_constraint_penalty <= 0:
ineq_constraint_penalty += - 1/constraint_value
else:
ineq_constraint_penalty += 100*rp * constraint_value
eq_constraint_penalty = 0
for constraint in equalityConstraints:
constraint_value = constraint(position)**2
eq_constraint_penalty += constraint_value
result = objectiveValue + ineq_constraint_penalty/rp + rp * eq_constraint_penalty
return result |
def compute_basin(i, j, grid):
"""
Basin starts from the low point and includes any point up, down, left,
right as long as that point is lower than the point next to it. 9 is
automatically not included.
From each point that is in the basin, we have to recursively compute
the basin to the up, down, left, and right.
"""
basin = set([(i,j)])
# up
for k in range(i - 1, -1, -1):
if grid[k][j] == 9 or grid[k][j] < grid[i][j]:
break
elif grid[k][j] > grid[i][j]:
basin = basin.union(compute_basin(k, j, grid))
# down
for k in range(i + 1, len(grid)):
if grid[k][j] == 9 or grid[k][j] < grid[i][j]:
break
elif grid[k][j] > grid[i][j]:
basin = basin.union(compute_basin(k, j, grid))
# left
for k in range(j - 1, -1, -1):
if grid[i][k] == 9 or grid[i][k] < grid[i][j]:
break
elif grid[i][k] > grid[i][j]:
basin = basin.union(compute_basin(i, k, grid))
# right
for k in range(j + 1, len(grid[0])):
if grid[i][k] == 9 or grid[i][k] < grid[i][j]:
break
elif grid[i][k] > grid[i][j]:
basin = basin.union(compute_basin(i, k, grid))
return basin |
def get_fibonacci(n):
"""
A self-recursive function to obtain the final fibonacci result.
:type n: int
:rtype: int
"""
if n == 1 or n == 2:
return 1
n1 = n - 1
n2 = n - 2
fibr1 = get_fibonacci(n1)
fibr2 = get_fibonacci(n2)
res = fibr1 + fibr2
return res |
def paper_doll(text):
"""
Given a string, return a string where for every character in the original there are three characters
:param text:str
:return:str
paper_doll('Hello') --> 'HHHeeellllllooo'
paper_doll('Mississippi') --> 'MMMiiissssssiiippppppiii'
"""
new_text = ""
for ch in text:
new_text += ch * 3
return new_text |
def unwords(strings):
"""
``unwords :: [String] -> String``
unwords is an inverse operation to words. It joins words with separating
spaces.
"""
return " ".join(strings) |
def srange(data):
"""
Accept data (list)
Return range of data.
"""
return max(data) - min(data) |
def _get_plot_title(target_name: str, last_observation_date: str,
eval_dataset_creation_date: str,
forecast_horizon: int) -> str:
"""Gets the title of the plot."""
return (
f"Comparison of metrics for predicting {target_name}. Forecast date: "
f"{last_observation_date}, forecast horizon: {forecast_horizon} days, "
f"evaluation reporting date: {eval_dataset_creation_date}.") |
def next_velocity(vel, acc, next_acc, h):
""" returns velocity at next time step for a particle. """
"""
parameters
----------
vel : array
velocity component of particle
acc : array
current force felt by particle
next_acc : array
accelaration acting on particle at next time stamp
h : float
simulation timestep
"""
return vel + 0.5*h*(acc + next_acc) |
def split_strip(string, delimiter=u','):
"""
Splits ``string`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not string:
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w] |
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
left = 0
right = number
while left <= right:
mid = (left + right)//2
if mid * mid <= number < (mid+1)*(mid+1):
return mid
elif number < mid * mid:
right = mid
else:
left = mid + 1
pass |
def get_note_message(note):
"""return a message according to note
note is a float < 10 (10 is the highest note)
"""
assert note <= 10, "Note is %.2f. Either you cheated, or pylint's \
broken!" % note
if note < 0:
msg = 'You have to do something quick !'
elif note < 1:
msg = 'Hey! This is really dreadful. Or maybe pylint is buggy?'
elif note < 2:
msg = "Come on! You can't be proud of this code"
elif note < 3:
msg = 'Hum... Needs work.'
elif note < 4:
msg = 'Wouldn\'t you be a bit lazy?'
elif note < 5:
msg = 'A little more work would make it acceptable.'
elif note < 6:
msg = 'Just the bare minimum. Give it a bit more polish. '
elif note < 7:
msg = 'This is okay-ish, but I\'m sure you can do better.'
elif note < 8:
msg = 'If you commit now, people should not be making nasty \
comments about you on c.l.py'
elif note < 9:
msg = 'That\'s pretty good. Good work mate.'
elif note < 10:
msg = 'So close to being perfect...'
else:
msg = 'Wow ! Now this deserves our uttermost respect.\nPlease send \
your code to python-projects@logilab.org'
return msg |
def compare_broken_content(broken_content_prod, broken_content_dev):
"""Compare output between 2 content_validation runs"""
unique_ids_prod = set([i["unique_id"] for i in broken_content_prod])
unique_ids_dev = set([i["unique_id"] for i in broken_content_dev])
new_broken_content_ids = unique_ids_dev.difference(unique_ids_prod)
new_broken_content = []
for item in broken_content_dev:
if item["unique_id"] in new_broken_content_ids:
new_broken_content.append(item)
return new_broken_content |
def _meters_to_degrees(meters: int) -> float:
""" convert meters to approximate degrees """
# meters * 360 / (2 * PI * 6400000)
# multiply by (1/cos(lat) for longitude)
return meters * 1 / 111701 |
def _to_yaml(wrapped, instance, args, kwargs):
"""
New in v17
public decorator for yaml generator
"""
return wrapped(*args, **kwargs) |
def opt_pol_2(state) -> int:
"""'50 or 100 or bust' policy.
When capital is less than 50, implement a '50 or bust' policy; when
greater than 50, implement '100 or 50' policy; when exactly 50, go
all in.
"""
if state < 50:
return min(state, 50 - state)
elif state == 50:
return 50
else:
return min(state - 50, 100 - state) |
def get_excluded_hpo_ids_str(excluded_hpo_ids):
"""
Formats list of hpo_ids or None to add to bq script, adds empty hpo_id
:param excluded_hpo_ids: List output by args parser or None
:return: String of hpo_ids enclosed in single quotes along with empty hpo_ids
"""
if excluded_hpo_ids is None:
excluded_hpo_ids = []
# use uppercase for all hpo_ids as is in the table
excluded_hpo_ids = [hpo_id.upper() for hpo_id in excluded_hpo_ids]
# exclude empty site since lookup table contains it
excluded_hpo_ids.append('')
excluded_hpo_ids_str = ', '.join(
[f"'{hpo_id}'" for hpo_id in excluded_hpo_ids])
return excluded_hpo_ids_str |
def check_coordinates(loc_lat: float, loc_long: float, data_lat: float, data_long: float, margin=1.0) -> bool:
"""
Compares a pair of coordinates (latitude and longitude) to ensure proximity to a maximum value ('margin').
:param loc_lat: Latitude 1
:param loc_long: Longitude 1
:param data_lat: Latitude 2
:param data_long: Longitude 2
:param margin: Maximum error margin to be accepted. A value greater than 'margin' yields False.
:return: True if the difference between coordinates (in absolute value) is less than margin (for both latitude
and longitude), and False otherwise.
:rtype: bool
"""
return abs(loc_lat - data_lat) <= margin and abs(loc_long - data_long) <= margin |
def add_none_match_to_list(combined_list: list, other_list: list) -> list:
"""Adds the none matched lists to the combined list
Args:
combined_list (list): combined list
other_list (list): [description]
Returns:
list: combined list of dictionaries
"""
headers = list(combined_list[0].keys())
for other_list_row in other_list:
if other_list_row['match'] == False:
row_data = other_list_row
for header in headers:
if header not in row_data:
row_data[header] = 0
combined_list.append(row_data)
return combined_list |
def n_highest_grossing_actors(actors, n):
"""
Query helper to get the the n highest grossing ACTORS
:param actors: Dictionary (name of actor --> Actor node)
:param n: Number of highest grossing ACTORS to find
:return: List of n highest grossing ACTORS, sorted highest to lowest. If n > length of ACTORS,
all the ACTORS are returned ordered highest to lowest in terms of grossing value.
"""
actors_sorted_by_gross_value = sorted(actors, key=lambda x:actors[x].get_grossing_value(), reverse=True)
return actors_sorted_by_gross_value[:min(n, len(actors_sorted_by_gross_value))] |
def val_cm(val):
"""
arg val is tuple (2.0,'mm').
Return just the number in cm.
"""
if val[1] == 'cm':
return val[0]
if val[1] == 'mm':
return val[0]/10.0
if val[1] == 'm':
return val[0]*10
if val[1] == 'nm':
return val[0]/1E8 |
def extended_euclidean_algorithm(a, b):
"""
Returns a three-tuple (gcd, x, y) such that
a * x + b * y == gcd, where gcd is the greatest
common divisor of a and b.
This function implements the extended Euclidean
algorithm and runs in O(log b) in the worst case.
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r != 0:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t |
def _dhms2day( d, h, m, s ):
"""
The `_dhms2day` function converts hour, minute and second format for a given
day (decimal or integer) into a decimal day.
Returns a float.
"""
return d + h/24.0 + m/1440.0 + s/86400.0 |
def if_cond(condition, if_return, else_return):
"""
Evaluate a condition to select a return value
@param condition Statement that evaluates to a boolean
@param if_return Value that is returned if condition is true
@param else_return Value that is returned if condition is false
@return if_return or else_return, depending on condition
"""
if condition:
return if_return
else:
return else_return |
def hello(name: str = "World") -> str:
"""Provide a name, return a greeting."""
return f"Hello, {name}!" |
def constrain (n, min, max):
"""This returns a number, n constrained to the min and max bounds. """
if n < min:
return min
if n > max:
return max
return n |
def strip_backslashes(input_string: str) -> str:
"""
>>> strip_backslashes(r'\\test\\\\')
'test'
"""
input_string = input_string.strip('\\')
return input_string |
def g_minority_1_dev(by_grps):
"""Passed if large group is fully consistent, and both small group items are different to each other and the large group letter
Examples:
Large - all S, small O,N -> TRUE
Large - all S, small S,N -> False
Large - all S, small N,N -> False
This behavior is because the second test case is already picked up by Red test, and I want to avoid confilcts of these labels.
The third case is because this is already picked up by the first green test."""
if by_grps[0][0]==by_grps[0][1]:
print("Failed g_1dev_t2 -- small groups match")
return False
cts = 0
ctn = 0
cto = 0
big_letter= ""
for item in by_grps[1]:
if item=="S":
cts+=1
if item=="N":
ctn+=1
if item=="O":
cto+=1
if(cts==4 or ctn==4 or cto ==4):
pass
else:
print("Failed g_1dev_t2 -- no large group consistency")
return False
if(cts==4):
big_letter = "S"
if(cto==4):
big_letter = "O"
if(ctn == 4):
big_letter = "N"
for item in by_grps[0]:
if(item==big_letter):
print("Faield g_1dev_t2 -- a small group member and large group letter are the same")
return False
print("Confirmed g_1dev_t2 -- small group with 1 deviancy and large group are different")
return True |
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
If there are two many splits such that some slice can be empty.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices such that some splits are empty')
slices.append(slice(begin, end))
return slices |
def build_select(table, to_select, where=None):
"""
Build a select request.
Parameters
----------
table : str
Table where query will be directed.
to_set: iterable
The list of columns to select.
where: iterable
The list of conditions to constrain the query.
Returns
-------
str
Built query string.
"""
if where:
return (
"SELECT " + ", ".join(f"{w}" for w in to_select) +
f" FROM \"{table}\" WHERE " +
" AND ".join(f"{w} = :{w}" for w in where)
)
return (
"SELECT " + ", ".join(f"{w}" for w in to_select) + f" FROM \"{table}\""
) |
def startswith(text: str, starts: str) -> bool:
"""
Template implementation of `str.startswith()`.
"""
if isinstance(text, str):
return text.startswith(starts)
return False |
def timezone_format(value):
""" Check the value of the timezeone offset and, if postive, add a plus sign"""
try:
if int(value) > 0:
value = '+' + str(value)
except:
value = ''
return value |
def make_qualified_schema_name(schema_name: str, table_name: str) -> str:
"""produces a qualified schema name given a schema and table
:param schema_name: the schema name
:param table_name: the table name
:return: a quoted dot separated qualified schema name
"""
return '"{s}"."{t}"'.format(s=schema_name, t=table_name) |
def eth_addr(a):
"""
Print Mac Address in the human format
Args:
a: string "6s"
Returns:
mac in the human format
"""
if isinstance(a, bytes):
a = a.decode("latin")
string = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
mac = string % (ord(a[0]), ord(a[1]), ord(a[2]),
ord(a[3]), ord(a[4]), ord(a[5]))
return mac |
def normalize_date(date):
"""Bring the date argument to a uniform format, which is YYYY-MM-DD."""
# Remove the time portion.
time_pos = date.find('T')
if time_pos >= 0:
date = date[:time_pos]
# Insert dashes.
if len(date) == 8:
date = date[:4] + '-' + date[4:6] + '-' + date[6:]
return date |
def fullest_bank(banks):
"""
>>> fullest_bank([2, 4, 1, 2])
1
>>> fullest_bank([3, 1, 2, 3])
0
"""
return banks.index(max(banks)) |
def parse_s2bins(s2bins):
"""
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
"""
s2b = {}
b2s = {}
for line in s2bins:
line = line.strip().split()
s, b = line[0], line[1]
if 'UNK' in b:
continue
if len(line) > 2:
g = ' '.join(line[2:])
else:
g = 'n/a'
b = '%s\t%s' % (b, g)
s2b[s] = b
if b not in b2s:
b2s[b] = []
b2s[b].append(s)
return s2b, b2s |
def mag2mom_nm(mw):
"""Converts magnitude to moment - newtonmetre"""
return 10 ** (9.05 + 1.5 * mw) |
def intents_to_string(intents, queryset=False):
"""
Args:
intents: [{"action": "/application/json/view"}, ...] OR
[models.Intent] (if queryset=True)
Returns:
['<intent.action', ...]
"""
if queryset:
new_intents = [i.action for i in intents]
else:
new_intents = [i['action'] for i in intents]
return str(sorted(new_intents)) |
def make_list(obj):
"""Convert an object to a list if it is not already"""
if isinstance(obj, list):
return obj
return [obj] |
def py_str(string):
"""Convert C string back to Python string"""
return string.decode('utf-8') |
def calc_more_stuff(result):
"""Do some more calculations"""
if type(result) is list:
result += [{"more": "stuff"}]
elif type(result) is dict:
result["more"] = "stuff"
return result |
def surface_for_tile(northing, easting, elevation):
"""Returns the points and faces that represent the tile boundary.
The provided elevation will be used and thus it will be uniform across the
tile. The corner points will not be based on the elevation of the corners
in the tiles nor will it be the average elevation.
Example
-------
Generate a surface for each tile in a folder:
>>> import hgt
>>> import hgtools
>>> points = [
>>> hgt.location_hgt(path) for path in hgt.find_hgt_files(source_folder)]
>>> for northing, easting in points:
>>> points, faces = surface_for_tile(northing, easting, 0.0)
"""
points = [
(easting, northing, elevation),
(easting + 1, northing, elevation),
(easting + 1, northing + 1, elevation),
(easting, northing + 1, elevation),
]
faces = [
(0, 1, 2),
(2, 3, 0),
]
return points, faces |
def _round_to_bit(value, power):
"""Rounds the given value to the next multiple of 2^power.
Args:
value: int to be rounded.
power: power of two which the value should be rounded up to.
Returns:
the result of value rounded to the next multiple 2^power.
"""
return (((value - 1) >> power) + 1) << power |
def interpret_line(line, splitter=','):
"""
Split text into arguments and parse each of them to an appropriate format (int, float or string)
Args:
line:
splitter:
Returns: list of arguments
"""
parsed = list()
elms = line.split(splitter)
for elm in elms:
try:
# try int
el = int(elm)
except Exception as ex1:
try:
# try float
el = float(elm)
except Exception as ex2:
# otherwise just leave it as string
el = elm.strip()
parsed.append(el)
return parsed |
def cipher(text, shift, encrypt=True):
"""
encrypting or decrypting a text with Caesar cipher.
It is a type of substitution cipher in which each letter in the plaintext is replaced by a letter some fixed number of positions down the alphabet
Parameters
----------
text : string
text waiting to be encoded.
shift: int
shift of positions.
encrypt: boolean
If True, shift down the alphabet(encrypt);
If False, shift up the alphabet(decrypt)
Returns
-------
string
text output after encrypt or decrypt.
Examples
--------
>>> from cipher_zg2382 import cipher_zg2382
>>> cipher('D', 3, True)
'A'
encrypting
>>> from cipher_zg2382 import cipher_zg2382
>>> cipher('A', 3, False)
'D'
decrypting
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text |
def repeat_inside(line: str) -> str:
"""
first the longest repeating substring
"""
ans = ''
for i in range(len(line)):
for j in range(len(line) - i):
s = line[i:i + j + 1]
for repeat in range(2, len(line) // len(s) + 1):
ls = s * repeat
if (ls in line) and (len(ls) > len(ans)):
ans = ls
return ans |
def get_inter_area(box_a, box_b):
"""
Args:
box_a: (xmin, ymin, xmax, ymax)
box_b: (xmin, ymin, xmax, ymax)
Returns:
intersection_area: intersection area between two points
"""
x_a = max(box_a[0], box_b[0])
y_a = max(box_a[1], box_b[1])
x_b = min(box_a[2], box_b[2])
y_b = min(box_a[3], box_b[3])
intersection_area = max(0, x_b - x_a) * max(0, y_b - y_a)
return intersection_area |
def seconds_readable(seconds: int) -> str:
"""Turn seconds as an int into 'DD:HH:MM:SS'."""
r: list[str] = []
days, seconds = divmod(seconds, 60 * 60 * 24)
if days:
r.append(f"{days:02d}")
hours, seconds = divmod(seconds, 60 * 60)
if hours:
r.append(f"{hours:02d}")
minutes, seconds = divmod(seconds, 60)
r.append(f"{minutes:02d}")
r.append(f"{seconds % 60:02d}")
return ":".join(r) |
def str_to_bytes(obj) -> bytes:
"""Convenience method around encode"""
return obj.encode() |
def calculate_centroid(cluster):
"""
Calculates centroids of a single cluster
cluster is a 2-dimensional list of data points, each data point being a list of two integers.
Returns a centroid as a list of integers.
"""
num_of_points = len(cluster)
cent_x = sum(point[0] for point in cluster) / num_of_points
cent_y = sum(point[1] for point in cluster) / num_of_points
return [cent_x, cent_y] |
def formatTwoDecimalPoints(floatNum):
"""Return a string of the float number with only two decimal points."""
return "%.2f" % floatNum |
def OrganizeFindingsByFile(findings_list):
"""Creates dict mapping filepath to all findings in that file.
Args:
findings_list: a list of Finding objects.
Returns:
A dict. Keys are filepath strings and values are a list of findings
that are associated with that filepath.
"""
file_findings_map = {}
for finding in findings_list:
filepath = finding.file_context.filepath
file_list = file_findings_map.get(filepath)
# not in map: add as new entry
if file_list is None:
file_findings_map[filepath] = [finding]
# if already in map, append this finding to associated value
else:
file_findings_map[filepath].append(finding)
return file_findings_map |
def is_svg(url) -> bool:
"""
Determine if it is an image of svg.
Parameters
----------
url : str
Target url.
Returns
-------
True or False: Return True if this url content is an image of svg else returns False.
"""
if url.endswith(".svg"):
return True
else:
return False |
def product_vector3_3x3(vector,matrix):
"""mulipli le vector par une matrice care """
x = matrix[0][0]*vector[0] + matrix[0][1]*vector[1] + matrix[0][2]*vector[2]
y = matrix[1][0]*vector[0] + matrix[1][1]*vector[1] + matrix[1][2]*vector[2]
z = matrix[2][0]*vector[0] + matrix[2][1]*vector[1] + matrix[2][2]*vector[2]
return [x,y,z] |
def _is_a_url(input_element):
"""
-----
Brief
-----
Auxiliary function responsible for checking if the input is a string that contains an url.
-----------
Description
-----------
Some biosignalsnotebooks functions support a remote access to files. In this situation it is
important to understand if the input is an url, which can be easily achieved by checking if some
key-markers are present.
The key-markers that will be searched are "http://", "https://", "www.", ".pt", ".com", ".org",
".net".
----------
Parameters
----------
input_element : unknown
The data structure that will be checked.
Returns
-------
out : bool
If the input_element is a string and if it contains any key-marker, then True flag will be returned.
"""
if type(input_element) is str:
# Check if signal_handler is a url.
# [Statements to be executed if signal_handler is a url]
if any(mark in input_element for mark in ["http://", "https://", "www.", ".pt", ".com",
".org", ".net"]):
return True
else:
return False
else:
return False |
def card_html_id(card):
"""Return HTML id for element containing given card."""
return f'c{card:02d}' |
def next_power_of_two(x):
# type: (int) -> int
"""
Compute the next power of two that is greater than `x`:
>>> next_power_of_two(0)
1
>>> next_power_of_two(1)
2
>>> next_power_of_two(2)
4
>>> next_power_of_two(3)
4
>>> next_power_of_two(4)
8
"""
s = 1
while x & (x + 1) != 0:
x |= x >> s
s *= 2
return x + 1 |
def get_valid_segment(text):
""" Returns None or the valid Loki-formatted urn segment for the given input string. """
if text == '':
return None
else:
# Return the converted text value with invalid characters removed.
valid_chars = ['.', '_', '-']
new_text = ''
for char in text:
if char in valid_chars or char.isalnum():
new_text += char
return new_text |
def by_rank(line):
"""
Adds the rankings of each program
:param line:
:return:
"""
sum = 0
cnt = 0
for i in range(0, len(line[:-1]), 3):
if not '--' in line[i]:
cnt += 1
sum += int(line[i])
return sum / cnt |
def fileNaming(names):
"""
Since two files cannot have equal names,
the one which comes later will have an addition
to its name in a form of (k), where k is the smallest
positive integer such that the obtained name is not
used yet. Return an array of names that will be given
to the files.
"""
new_file_names = []
for name in names:
if name in new_file_names:
k = 1
while "{}({})".format(name, k) in new_file_names:
k += 1
name = "{}({})".format(name, k)
new_file_names.append(name)
return new_file_names |
def get_absolute_url(base_url: str, relative_url: str) -> str:
"""
Get absolute url for relative_url with given base_url.
:param base_url: base page url
:param relative_url: list of relative urls
:return: absolute url
"""
absolute_url = relative_url
if absolute_url.startswith('//'):
absolute_url = absolute_url[2:]
if absolute_url.startswith('/'):
if base_url.endswith('/'):
base_url = base_url[:-1]
absolute_url = base_url + absolute_url
return absolute_url |
def parse_gpu_ids(gpu_str):
"""Parse gpu ids from configuration.
Args:
gpu_str: string with gpu indexes
Return:
a list where each element is the gpu index as int
"""
gpus = []
if gpu_str:
parsed_gpus = gpu_str.split(",")
if len(gpus) != 1 or int(gpus[0]) >= 0:
return [int(p) for p in parsed_gpus]
return gpus |
def extract_entities(input_data_tokens, entity_dict):
"""Extracts valid entities present in the input query.
Parses the tokenized input list to find valid entity values, based
on the given entity dataset.
Args:
input_data_tokens: A list of string tokens, without any punctuation,
based on the input string.
entity_dict: A dictionary of dictionary, of entity values for a
particular entity type.
Returns:
A list of valid entity values and their start, stop token index
locations in the tokenized input query.
[(['comedy', 'action'], 5, 7), (['suspense'], 9, 10)]
Always returns a list. If no valid entities are detected, returns
an empty list.
"""
detected_entities = []
length = len(input_data_tokens)
for i, word in enumerate(input_data_tokens):
if word in entity_dict:
start = i
stop = -1
loc = i # keeps track of the current cursor posiiton
current_dict = entity_dict
# keeps track of the current dictionary data
while(loc <= length and current_dict):
if 1 in current_dict:
# tag index of a potential entity value if a
# longer entity is not present
stop = loc
if len(current_dict) == 1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
stop = -1 # reset
# if end of query reached or mismatch in entity values,
# discard and move on to the next word
if loc == length or input_data_tokens[loc] not in current_dict:
# save a shorter entity, if it exists in the already \
# parsed query
if stop != -1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
break
else:
# entity matches up until current word, continue
current_dict = current_dict[input_data_tokens[loc]]
loc += 1
return detected_entities |
def get_port_detail(ports):
"""
Iterate over ports details from response and retrieve details of ports.
:param ports: list of ports details from response
:return: list of detailed element of ports
:rtype: list
"""
return [{
'ID': port.get('id', ''),
'Number': port.get('number', '')
} for port in ports] |
def nick_match_tellee(nick, tellee):
"""Tell if a ``nick`` matches a ``tellee``.
:param str nick: Nick seen by the bot
:param str tellee: Tellee name or pattern
The check between ``nick`` and ``tellee`` is case-insensitive::
>>> nick_match_tellee('Exirel', 'exirel')
True
>>> nick_match_tellee('exirel', 'EXIREL')
True
>>> nick_match_tellee('exirel', 'dgw')
False
If ``tellee`` ends with a wildcard token (``*`` or ``:``), then ``nick``
matches if it starts with ``tellee`` (without the token)::
>>> nick_match_tellee('Exirel', 'Exi*')
True
>>> nick_match_tellee('Exirel', 'exi:')
True
>>> nick_match_tellee('Exirel', 'Exi')
False
Note that this is still case-insensitive.
"""
if tellee[-1] in ['*', ':']: # these are wildcard token
return nick.lower().startswith(tellee.lower().rstrip('*:'))
return nick.lower() == tellee.lower() |
def set_and_true(key, _dict):
"""Is key in dict and value True?
Args:
key (str): Key to lookup in dictionary.
_dict (dict): The dictionary.
Returns:
bool: Is key in dict and value True?
"""
return key in _dict and _dict[key] is True |
def filter_by_id(result_list_to_filter, filtering_id_field, desired_id):
""" Given a list of results, returns only the ones that are tied to a given ID.
Args:
result_list_to_filter (list): list of dictionaries, containing data about entries.
filtering_id_field: The name of the field containing the IDs to filter.
desired_id: The ID to keep when filtering.
Returns:
list: A copy of the input list, containing only entries with the desired ID.
"""
new_results_list = [result for result in result_list_to_filter if result[filtering_id_field] == desired_id]
return new_results_list |
def flatten(lst):
"""Returns a flattened version of lst.
>>> flatten([1, 2, 3]) # normal list
[1, 2, 3]
>>> x = [1, [2, 3], 4] # deep list
>>> flatten(x)
[1, 2, 3, 4]
>>> x = [[1, [1, 1]], 1, [1, 1]] # deep list
>>> flatten(x)
[1, 1, 1, 1, 1, 1]
"""
"*** YOUR CODE HERE ***"
if type(lst) != list:
return [lst]
else:
flst = []
for elem in lst:
if type(elem) != list:
flst.append(elem)
else:
flst.extend(flatten(elem))
return flst |
def viewitems(d):
"""Return either the items or viewitems method for a dictionary.
Args:
d (:obj:`dict`): A dictionary.
Returns:
view method: Either the items or viewitems method.
"""
func = getattr(d, "viewitems", None)
if func is None:
func = d.items
return func() |
def assign_min_points(num_points: int) -> int:
""" Improves runtime speed without impacting scores. set to 1 if willing to wait. """
return max((num_points // 1000), 1) |
def _get_md_relative_link(id_: str, title: str) -> str:
"""Returns a representation of a zettel that is a relative Markdown link.
Asterix at the beginning is a Markdown syntax for an unordered list, as links to
zettels are usually just used in references section of a zettel.
"""
return f"* [{id_}](../{id_}) {title}" |
def divide2(a, b):
"""use ``try... except PossibleException... except`` clause.
List all possible exception you can imagine, and leave other unpredictable
exception to ``except clause``.
"""
try:
return a * 1.0 / b
except ZeroDivisionError:
raise ValueError("Zero division Error!")
except Exception as e:
raise e |
def decode(token):
"""Un-escape special characters in a token from a path representation.
:param str token: The token to decode
:return: The decoded string
:rtype: str
"""
return token.replace(r'\/', '/').replace('\\\\', '\\') |
def rearrange_digits(input_list):
"""
Rearrange Array Elements so as to form two number such that their sum is maximum.
Args:
input_list(list): Input List
Returns:
(int),(int): Two maximum sums
"""
if len(input_list) == 0 or type(input_list) != list:
print("INVALID INPUT")
return
def mergeSort(myList):
if len(myList) > 1:
mid = len(myList) // 2
left = myList[:mid]
right = myList[mid:]
# Recursive calling each half
mergeSort(left)
mergeSort(right)
# Two pointers for iterating the two halves
i = 0
j = 0
# pointer for the main list
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
# The value from the left half has been used
myList[k] = left[i]
# Move the iterator forward
i += 1
else:
myList[k] = right[j]
j += 1
# Move to the next slot
k += 1
# For all the remaining values
while i < len(left):
myList[k] = left[i]
i += 1
k += 1
while j < len(right):
myList[k]=right[j]
j += 1
k += 1
mergeSort(input_list)
string1,string2 = '',''
swt = True
for i in range(len(input_list)-1,-1,-1):
if swt:
string1 += str(input_list[i])
swt = False
else:
string2 += str(input_list[i])
swt = True
return [int(string1),int(string2)] |
def get_attribute_components(attribute, vars_only=True):
"""
Gets component names out of an attribute
>>> from pprint import pprint
>>> attr = ('a', ('sub1', '?c1'))
>>> get_attribute_components(attr)
{'?c1'}
>>> attr = '?c1'
>>> get_attribute_components(attr)
{'?c1'}
>>> attr = ('a', ('sub1', 'c1'))
>>> get_attribute_components(attr)
set()
>>> attr = 'c1'
>>> get_attribute_components(attr)
set()
"""
names = set()
if vars_only is not True and attribute[0] != '_':
names.add(attribute)
if isinstance(attribute, tuple):
for ele in attribute:
if isinstance(ele, tuple):
for name in get_attribute_components(ele, vars_only):
names.add(name)
else:
if ((vars_only is not True or (len(ele) > 0 and ele[0] == '?'))
and (ele != '_' and len(ele) > 0 and ele[0] != '_')):
names.add(ele)
elif ((vars_only is not True and attribute[0] != '_') or
attribute[0] == '?'):
names.add(attribute)
return names |
def _int_or_none(value):
"""Helper: return an integer or ``None``."""
if value is not None:
value = int(value)
return value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.