content stringlengths 42 6.51k |
|---|
def as_path_change_cl(paths):
"""" mark the idx at which there is surely an AS path change not related to timeout, private address etc.
Args:
paths (list of list of ASN): [[ASN,...],...]
Returns:
list of int, index of change is set to 1, otherwise 0
"""
change = [0] * len(paths)
for idx, path in enumerate(paths):
if idx > 0:
if len(path) > 0 and len(paths[idx-1]) > 0:
if path[-1] == paths[idx-1][-1] and path != paths[idx-1]: # exclude reachability issue
diff_as = set(path) ^ set(paths[idx-1])
if len(diff_as) > 0 and all([type(i) is int for i in diff_as]): # all difference is a valid ASN
change[idx] = 1
return change |
def clean(string: str) -> str:
"""
Trim trailing space from the end of field names.
Some field headers have spaces appended to the end of the cells.
"""
return string.strip() |
def merge_dict(dict1, dict2):
"""Merges :obj:`dict2` into :obj:`dict1`.
Args:
dict1: The base dictionary.
dict2: The dictionary to merge.
Returns:
The merged dictionary :obj:`dict1`.
"""
for key, value in dict2.items():
if isinstance(value, dict):
dict1[key] = merge_dict(dict1.get(key, {}), value)
else:
dict1[key] = value
return dict1 |
def conn_from_flowtuple(ft):
"""Convert the flow tuple into a dictionary (suitable for JSON)"""
sip, sport, dip, dport, offset, relts = ft
return {"src": sip, "sport": sport,
"dst": dip, "dport": dport,
"offset": offset, "time": relts} |
def get_ecr_vulnerability_package_name(vulnerability):
"""
Get Package Name from a vulnerability JSON object
:param vulnerability: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: str package name
"""
for attribute in vulnerability["attributes"]:
if attribute["key"] == "package_name":
return attribute["value"]
return None |
def mean(values):
"""
Example:
>>> mean([1, 2, 3, 4, 4])
2.8
"""
non_nones = [x for x in values if x is not None]
return sum(non_nones) / len(non_nones) |
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the x and y axes of
the image.
Parameters
----------
shape : tuple
Shape of original data.
slc : tuple
Slice through the data, 'x', and 'y'
"""
return slc.index('x'), slc.index('y') |
def perimetro(lado_1, lado_2, lado_3):
"""
(num, num, num) -> num
Calcula el perimetro de un triangulo dados sus lados
>>> perimetro(1, 2, 3)
6
>>> perimetro(2, 2, 2)
6
>>> perimetro(3, 3, 3)
9
:param lado_1:
:param lado_2:
:param lado_3:
:return: num el perimetro del triangulo
"""
return lado_1 + lado_2 + lado_3 |
def process_sprite_group(canvas, sprite_group, method):
"""
Helper function to take a "set" and a "canvas" and
call the "update" and "draw" methods for each "Sprite"
in the group. Check also the return value of "update"
for "Sprites". If it returns "True", remove the
"Sprite" from the group.
Note: iterate over a copy of the "sprite group"
to avoid deleting (if necessary) from the same set
over which we are iterating.
"""
group_copy = set(sprite_group)
for sprite in group_copy:
if method == "draw":
sprite.draw(canvas)
else:
remove_sprite = sprite.update()
if remove_sprite:
sprite_group.remove(sprite)
return None |
def add_suffix(name: str, suffix: str):
"""Add suffix to string."""
if suffix:
return f'{name}_{suffix}'
else:
return name |
def get_show_columns(database):
"""
Gets the query of SHOW COLUMNS for a given database.
:type str
:param database: A database name
:rtype str
:return A query
"""
return ("SELECT `TABLE_NAME`, `COLUMN_NAME` "
" FROM "
"`information_schema`.`COLUMNS`"
" WHERE "
"`TABLE_SCHEMA` = '{:s}'").format(database) |
def is_multiplicable(number, multiplicable):
"""The is_multiplicable function tells if a number is multiplicable by another
Args:
number (int): The number to compare.
multiplicable (int): The multiplicable number to check.
Returns:
bool: True if multiplicable, False otherwise.
"""
return number % multiplicable == 0 |
def fill_cohort_config_missing(config):
"""
If none cohort_config section is provided, include all the entities by default
Args:
config (dict) a triage experiment configuration
Returns: (dict) a triage cohort config
"""
from_query = "(select entity_id, {knowledge_date} as knowledge_date from (select * from {from_obj}) as t)"
feature_aggregations = config['feature_aggregations']
from_queries = [from_query.format(knowledge_date = agg['knowledge_date_column'], from_obj=agg['from_obj']) for agg in feature_aggregations]
unions = "\n union \n".join(from_queries)
query = f"select distinct entity_id from ({unions}) as e" +" where knowledge_date < '{as_of_date}'"
cohort_config = config.get('cohort_config', {})
default_config = {'query': query, 'name': 'all_entities'}
default_config.update(cohort_config)
return default_config |
def max_multiple(divisor, bound):
"""
Finds the largest dividable integer that is lower than bound.
:param divisor: positive integer.
:param bound: positive integer.
:return: the largest integer N, such that, N is divisible by divisor, N is less
than or equal to bound, and N is greater than 0.
"""
return bound - (bound % divisor) |
def _duplex(port_data):
"""Return duplex value for port.
Args:
port_data: Data dict related to the port
Returns:
duplex: Duplex value
0) Unknown
1) Half
2) Full
3) Half Auto
4) Full Auto
"""
# Initialize key variables
duplex = 0
value = ''
statuses = ('swPortDuplexStatus',
'dot3StatsDuplexStatus',
'portDuplex')
def get_duplex_value(status, val):
"""Return duplex value based on port status.
Args:
status: The status of the port
value: The value of the port status
Returns:
value: Duplex value
"""
cases = {
'swPortDuplexStatus': 2 if val == 1 else 1,
'dot3StatsDuplexStatus': 1 if val == 2 else (2 if val == 3 else 0),
'portDuplex': 1 if val == 1 else (2 if val == 2 else 0),
}
return cases[status]
for status in statuses:
if status in port_data:
value = port_data[status]
duplex = get_duplex_value(status, value)
break
# Process c2900PortDuplexState
# The Cisco 3500XL is known to report incorrect duplex values.
# Obsolete device, doesn't make sense supporting it.
if not duplex and 'c2900PortLinkbeatStatus' in port_data:
status_link = port_data['c2900PortLinkbeatStatus']
status_duplex = port_data['c2900PortDuplexStatus']
if status_link == 3:
# If no link beats (Not AutoNegotiate)
if status_duplex == 1:
duplex = 2
elif status_duplex == 2:
duplex = 1
else:
# If link beats (AutoNegotiate)
if status_duplex == 1:
duplex = 4
elif status_duplex == 2:
duplex = 3
# Return
return duplex |
def substring_set(w, l):
"""Returns all substrings of a word w of length l
Parameters
----------
w: list
List of characters representing a word
l: int
Length of substrings to generate
Returns
-------
set
A set of substrings of the specified length
"""
#return all unique substrings of a word w of length l from 1 letter to the entire word
substrings = set([])
for i in range(len(w)):
sub = w[i:(i+l)]
if len(sub) < l:
continue
substrings.update(['.'.join(sub)])
return substrings |
def checkSignal(frequency):
"""
If sound signal is 18.5Khz then signal means 0
else if it is 19.5Khz then means 1.
"""
global signal_gap
if frequency >= 18300 and frequency <= 18400:
return 0 # bit 00
elif frequency >= 18700 and frequency <= 18800:
return 1 # bit 01
elif frequency >= 19100 and frequency <= 19200:
return 2 # bit 10
elif frequency >= 19500 and frequency <= 19600:
return 3 # bit 11
elif frequency >= 18900 and frequency <=19000:
return -1 # bit start signal
elif frequency <= 18200:
return -2
else:
return -3 |
def rest_api_parameters(in_args, prefix='', out_dict=None):
"""Transform dictionary/array structure to a flat dictionary, with key names
defining the structure.
Example usage:
>>> rest_api_parameters({'courses':[{'id':1,'name': 'course1'}]})
{'courses[0][id]':1,
'courses[0][name]':'course1'}
"""
if out_dict is None:
out_dict = {}
if not type(in_args) in (list, dict):
out_dict[prefix] = in_args
return out_dict
if prefix == '':
prefix = prefix + '{0}'
else:
prefix = prefix + '[{0}]'
if type(in_args) == list:
for idx, item in enumerate(in_args):
rest_api_parameters(item, prefix.format(idx), out_dict)
elif type(in_args) == dict:
for key, item in in_args.items():
rest_api_parameters(item, prefix.format(key), out_dict)
return out_dict |
def get_distance(coords1, coords2):
"""Gets the distance between 2 pairs of coordinates
Args:
coords1 ([int, int]): The first set of coordinates
coords2 ([int, int]): The second set of coordinates
"""
return abs(coords1[0] - coords2[0]) + abs(coords1[1] - coords2[1]) |
def minmax(n, min_val, max_val):
"""Limit n to be between min_val and max_val"""
assert min_val <= max_val
return max(min(n, max_val), min_val) |
def items_produced_in_n_days(n, machines):
"""
in n days, a machine finishes n/days_per_item items. To get all items produced
in n days, we need to sum over all machines
"""
return sum([n//machine for machine in machines]) |
def apply_fstring(swagger_obj, vars, k=None):
"""
Format the f-strings in the swagger object
"""
if isinstance(swagger_obj, str):
return swagger_obj.format(**vars)
elif isinstance(swagger_obj, list):
for i in swagger_obj:
apply_fstring(i, vars)
elif isinstance(swagger_obj, dict):
for k, v in swagger_obj.copy().items():
new_v = apply_fstring(v, vars)
if isinstance(k, int):
# used to convert integer codes (from the responses)
del swagger_obj[k]
k = str(k)
swagger_obj[k] = new_v
return swagger_obj |
def parser_module_name(parser_fname):
"""
Generates module path for the given
parser name
>>> parser_module_name('some_parser.py')
>>> 'bot.parsers.some_parser'
"""
return ".".join(["bot", "parsers", parser_fname]) |
def format_image_size(size):
"""Formats the given image size to a two-element tuple.
A valid image size can be an integer, indicating both the height and the
width, OR can be a two-element list or tuple. Both height and width are
assumed to be positive integer.
Args:
size: The input size to format.
Returns:
A two-elements tuple, indicating the height and the width, respectively.
Raises:
ValueError: If the input size is invalid.
"""
if not isinstance(size, (int, tuple, list)):
raise ValueError(f'Input size must be an integer, a tuple, or a list, '
f'but `{type(size)}` received!')
if isinstance(size, int):
size = (size, size)
else:
if len(size) == 1:
size = (size[0], size[0])
if not len(size) == 2:
raise ValueError(f'Input size is expected to have two numbers at '
f'most, but `{len(size)}` numbers received!')
if not isinstance(size[0], int) or size[0] < 0:
raise ValueError(f'The height is expected to be a non-negative '
f'integer, but `{size[0]}` received!')
if not isinstance(size[1], int) or size[1] < 0:
raise ValueError(f'The width is expected to be a non-negative '
f'integer, but `{size[1]}` received!')
return tuple(size) |
def cwE(nc, w, adj, *args):
"""
Create column width _profile for cmds.rowColumnlayout command
:param nc: number of columns
:param w: total width of whole layout
:param adj: adjustment number to fit all columns to layout
:return:
column width
"""
width = (w - adj) / nc
cw = []
for i in range(nc):
columnID = i + 1
cw.append((columnID, width))
return cw |
def problem_4_6(node1, node2):
""" Design an algorithm and write code to find the first common ancestor of
two nodes in a binary tree. Avoid storing additional nodes in a data
structure. NOTE: This is not necessarily a binary search tree.
Solution: traverse from n1 up to the root; for each ancestor, start traversing
from n2 up to the root until you you hit n1's parent of the root.
Alternative solution (not the implementation below): at the very least the
root will be the common ancestor of the two nodes. So, build two linked lists
with all the nodes on the path from each node to the root. Then traverse the
two lists until the last element which is common. This uses extra space!
"""
n1 = node1
while n1 != None:
n2 = node2
while n2 != None:
if n2 == n1:
return n1
n2 = n2.parent
n1 = n1.parent
return None |
def build_idef_regexp( curr_idef ):
""" build regexp quering collection """
level_num= curr_idef.count('-')
# build regexp for the given idef plus it's context (siblings and full parental branch)
if level_num > 0: # deeper than 'a'
idef_srch= curr_idef.rsplit('-', 1)[0]
lookup_idef= r'^%s\-([A-Z]|\d)+$' % idef_srch
curr_idef= idef_srch
level= 1
while level < level_num:
idef_srch= curr_idef.rsplit('-', 1)[0]
lookup_idef += r'|^%s\-([A-Z]|\d)+$' % idef_srch
curr_idef= idef_srch
level += 1
lookup_idef += r'|^([A-Z]|\d)+$'
else: # simply query the highest level
lookup_idef= r'^([A-Z]|\d)+$'
return lookup_idef |
def reverse_dict(mapping):
"""Returns a dictionary composed of the reverse v, k pairs of a dictionary `mapping`.
"""
return {v: k for k, v in mapping.items()} |
def is_prime(n):
"""
Checks if n is a prime number.
:param n: (int) the number to check.
:return: (Boolean) True if n is prime, False otherwise.
"""
if n == 2:
return True
if n % 2 == 0:
return False
max = n ** 0.5 + 1
i = 3
while i <= max:
if n % i == 0:
return False
i += 2
return True |
def atomZ_sel_str(pdg_var):
""" Returns the ROOT sel string for cutting on Z using pdg codes.
pdg_var: string representing the true-pdg variable in the TTree
"""
return '{0}/10000 % 1000'.format(pdg_var) |
def generate_primary_diagonal(space, i, j):
"""
Diagonal will extend from top left corner to bottom right corner
"""
result = set()
def helper(space, i, j , result):
if 0<=i<len(space) and 0<=j<len(space[0]) and (i,j) not in result:
result.add((i,j))
helper(space,i+1,j+1, result)
helper(space,i-1,j-1, result)
helper(space, i,j, result)
return result |
def slice_to_list(s):
"""
Converts a slice object to a list of indices. This is used by some of the methods in message.py.
"""
step = s.step or 1
start = s.start
stop = s.stop
return [x for x in range(start,stop,step)] |
def exists_jpg_extension(files):
"""Checks whether any file in files is a jpg image
Args:
files: List of filenames
Returns:
True if any file is a jpg otherwise False
"""
has_jpg_extension = ['jpg' in fname for fname in files]
return any(has_jpg_extension) |
def moreFunc(y, z):
"""
@description: basic multiplication function
"""
val = y * z
return val |
def str2lines(s,length=80,joiner='\n'):
""" Convert string into a multiline text of the same length """
return joiner.join(s[i:i+length] for i in range(0,len(s),length)) |
def distance(model_result_summary_dict: dict, observations_dict: dict) -> float:
"""Simple distance function just returns the absolute difference between the model
result and the observations.
Note that these inputs are dictionaries."""
# 'model_summary' was the key in the dict that is returned by summary_stats
model_summary = model_result_summary_dict['model_summary']
observation = observations_dict['observation'] # Later we will create a dictionary of results and use the key 'observation' (they key could be anything)
return abs(model_summary - observation) |
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django-core.core"
else:
return "django-core.contrib-%s" % name |
def get_kan_kafka_version(params):
"""Return kafka docker image version."""
return "{minikan_version}_{scala_version}_{kafka_version}".format(**params) |
def SetCounts(setList):
"""Take a list of sets and return a dict that maps elements in the sets
to the number of sets they appear in as ints.
"""
keys = set()
for curSet in setList:
keys.update(curSet)
# Initialize the dictionary that will be used to store the counts of each element
countDict = dict.fromkeys(keys, 0)
# Populate the counts
for curSet in setList:
for element in curSet:
countDict[element] += 1
return countDict |
def getIntersectionNode(headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
n_a = headA
n_b = headB
while n_a.val != n_b.val:
n_a = headB if n_a is None else n_a.next
n_b = headA if n_b is None else n_b.next
return n_a |
def speak_excitedly(message, num_exclamations=1, enthusiasm=False):
"""Return a message followed by some exclamations points, possibly capitalized."""
message += '!' * num_exclamations
if not enthusiasm:
return message
return message.upper() |
def buildPacket(fileData, succesfullyOpened):
"""Builds the packet to be returned"""
statusCode = 0
#this checks if the file was succesfully opened
if succesfullyOpened:
statusCode = 1
#Packet Header
returnArray = bytearray([
(0x497E & 0xFF00) >> 8
, (0x497E & 0xFF) >> 0
, 2
, statusCode
])
# adds fileLength if file exsists
if succesfullyOpened:
returnArray.append((len(fileData) & 0xFF000000) >> 24)
returnArray.append((len(fileData) & 0xFF0000) >> 16)
returnArray.append((len(fileData) & 0xFF00) >> 8)
returnArray.append((len(fileData) & 0xFF) >> 0)
#this adds the file data
for byte in fileData:
returnArray.append(byte)
return returnArray |
def validate_annotations(x: str) -> str:
"""Validate Angry Tweets Annotations
Helper function. If a text is annotated with different labels,
replace with 'skip'.
Args:
x (str): Annotation.
Returns:
str: Adjusted annotation, single value.
"""
x = eval(x)
if len(set(x)) == 2:
x == ['skip']
return x[0] |
def frm_nrn_indx_to_2D_grd ( nrn_indx, n_col_x, n_row_y):
""" retuns 2D coordinate of nrn in form (nrn_row_n_y, nrn_col_n_x) """
nrn_row_n_y = nrn_indx / n_col_x
nrn_col_n_x = nrn_indx % n_row_y
nrn_in_2D = (nrn_row_n_y, nrn_col_n_x)
return nrn_in_2D |
def runoff_routine(rf, rs, sf, ss, sftr, kff, kf, ks):
"""runoff generation"""
# qff: surface runoff
# qf: fast subsurface runoff
# sf: slow subsurface runoff
sf += rf
qff = max(0, sf - sftr) / kff
sf -= qff
qf = sf / kf
sf -= qf
ss += rs
qs = ss / ks
ss -= qs
return qff, qf, qs, sf, ss |
def isstruct(ob):
""" isstruct(ob)
Returns whether the given object is an SSDF struct.
"""
if hasattr(ob, '__is_ssdf_struct__'):
return bool(ob.__is_ssdf_struct__)
else:
return False |
def find_col_by_key(info, key, value_prefix, value, default=None):
"""Given a list of dicts 'info', return the index for the first instance in
which info[key] == value_prefix + value."""
#print("find_col_by_key key: "+str(key)+", value: "+str(value_prefix+value));
if info != None:
for i in range(0, len(info)):
if info[i].get(key) == value_prefix+value:
return i
return default |
def cal_avg_donation(donations):
"""Calculate the average given a list of donations."""
if not donations:
return 0
return sum(donations) / len(donations) |
def convert_str_to_key_value(string, separators=(':', '=')):
"""
:param string: in 'foo:bar' or 'foo=bar' format
:param separators:
:return: (key, value)|(None, None)
"""
sep = ''
for s in separators:
if s in string:
sep = s
if sep != '':
array = [a.strip(' ') for a in string.split(sep)]
return array[0], array[1]
return None, None |
def gsutil_rm_rf_step(url):
"""Returns a GCB step to recursively delete the object with given GCS url."""
step = {
'name': 'gcr.io/cloud-builders/gsutil',
'entrypoint': 'sh',
'args': [
'-c',
'gsutil -m rm -rf %s || exit 0' % url,
],
}
return step |
def join_uri(uri, *segments):
"""Append segments to URI.
Example: join_uri("/a/b", "c", "d")
"""
sub = "/".join(segments)
if not sub:
return uri
return uri.rstrip("/") + "/" + sub |
def fix_output_name(name: str):
"""Removes the "Identity:0" of a tensor's name if it exists"""
return name.replace("/Identity:0", "", 1).replace("/resize/ResizeBilinear:0", "", 1) |
def increment_initial_values(XO, YO, ICT, IN):
"""
"""
X = XO
# increment initial values and counter
XO = -10.0 * YO
YO = -10.0 * X
# set X and Y to current value
X = XO
Y = YO
ICT = 0
IN += 1
return (XO, YO, X, Y, ICT, IN) |
def remove_single_characters(text):
"""
Remove any remaining single-character words
:text: string
:return: string
"""
return ' '.join([word for word in text.split() if len(word) > 1]) |
def flawed(A):
"""Flawed implementation of max()."""
my_max = 0
for v in A:
if my_max < v:
my_max = v
return my_max |
def get_column_names(max_element_size, num_extra_columns):
"""Generate a list of column names used for Pandas to parse the data
Args:
max_element_size(int, required):
the maximum element size in the mesh elements (e.g. if the mesh only contains E3T elements then
the max_element_size is 3. If it also contains E4Q elements then it is 4).
num_extra_columns(int, required):
The number of extra columns after the element nodes and the material
Returns:
List of column names
"""
max_cols = max_element_size + num_extra_columns + 1 # add column for element material
data_columns = ['cmp{}'.format(i) for i in range(max_cols)]
names = ['row_type']
names.extend(data_columns)
return names |
def _getspecfilename(line, path):
"""
Get the full filename including path from the line in the dat file
Parameters
----------
line : string
formated line from DAT file
example: 'IUE = hd029647_iue.fits'
path : string
path of the FITS file
Returns
-------
full_filename : str
full name of file including path
"""
eqpos = line.find("=")
tfile = line[eqpos + 2 :].rstrip()
return path + tfile |
def get_src_dst_weights(smp, src_idx, dst_idx):
""" Returns a tuple of src_weight, dst_weight indicating the weighting for
edge costs to node costs. Weights sum to 2, as they will later be divided by
2 in from_local_bounds.
"""
if isinstance(src_idx, list) or isinstance(dst_idx, list):
if len(src_idx) == 1:
return (2, 0)
elif len(dst_idx) == 1:
return (0, 2)
if not smp.use_monotone:
if hasattr(smp, "next_tmplt_idx") and smp.next_tmplt_idx in [src_idx, dst_idx]:
if src_idx == smp.next_tmplt_idx:
return (2, 0)
elif dst_idx == smp.next_tmplt_idx:
return (0, 2)
else:
assigned_tmplt_idxs = smp.assigned_tmplt_idxs
if src_idx in assigned_tmplt_idxs and dst_idx not in assigned_tmplt_idxs:
return (0, 2)
elif dst_idx in assigned_tmplt_idxs and src_idx not in assigned_tmplt_idxs:
return (2, 0)
else:
return (1, 1)
else:
return (1, 1) |
def entity_emiss_o(x, n_lbs, tp, exp_term=2):
"""
The function that calculates the emission prior of entity labels to the non-entity label 'O'
according to the diagonal values of the emission prior
Parameters
----------
x: diagonal values
n_lbs: number of entity labels (2e+1)
tp: turning point
exp_term: the exponential term that controls the slope of the function
Returns
-------
non-diagonal emission priors
"""
# separating piecewise function
low = x < tp
high = x >= tp
# parameters for the first piece
a = (2 - n_lbs) / ((exp_term - 1) * tp ** exp_term - exp_term * tp ** (exp_term - 1))
b = 1 - n_lbs
# parameter for the second piece
f_tp = a * tp ** exp_term + b * tp + 1
c = f_tp / (tp - 1)
# piecewise result
y = low * (a * x ** exp_term + b * x + 1) + high * (c * x - c)
return y |
def accuracy(true_positives, true_negatives, false_positives, false_negatives, description=None):
"""Returns the accuracy, calculated as:
(true_positives+true_negatives)/(true_positives+false_positives+true_negatives+false_negatives)
"""
true_positives = float(true_positives)
true_negatives = float(true_negatives)
false_positives = float(false_positives)
false_negatives = float(false_negatives)
if (true_positives + true_negatives + false_positives + false_negatives) < 1e-15:
return 0.0
return (true_positives+true_negatives)/(true_positives+false_positives+true_negatives+false_negatives) |
def dekatrian_week(dek_day: int, dek_month: int) -> int:
"""Returns the Dekatrian week day from a Dekatrian date.
Here we can see the elegance of Dekatrian, since it's not necessary to
inform the year. Actually, barely it's necessary to inform the month,
as it's only needed to check if that is an Achronian day.
Args:
dek_day (int): Day of the month.
dek_month (int): Month of the year.
Return:
int: The week day.
Example: 0 = Achronian; 1 = first week day; 2 = second week day ... 7 = seventh.
"""
if dek_month == 0:
return 0
else:
dek_week_day = ((dek_day - 1) % 7) + 1
return dek_week_day |
def Get_Heading_Change(heading_last, heading_current):
"""
determines the change in heading
:param heading_last: float of previous handing
:param heading_current: float of current heading
:return: float of the difference in heading
"""
r = heading_current - heading_last + 180
return (r % 360) - 180 |
def _chomp(string):
"""Rather than rstrip(), remove only the last newline and preserve purposeful whitespace"""
if len(string) and string[-1] == '\n':
string = string[:-1]
if len(string) and string[-1] == '\r':
string = string[:-1]
return string |
def euclid_exd(a, b):
"""
Return a tuple (u, v, d); they are the greatest common divisor d
of two integers a and b and u, v such that d = a * u + b * v.
"""
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError
u = 1
d = a
if b == 0:
v = 0
return (u, v, d)
else:
v_1 = 0
v_3 = b
while 1:
if v_3 == 0:
v = (d - a*u) // b
return (u, v, d)
q = d // v_3
t_3 = d % v_3
t_1 = u - q*v_1
u = v_1
d = v_3
v_1 = t_1
v_3 = t_3 |
def is_arrayish(list_of_elements, expected_type=None) -> bool:
"""
Checks, if all list / tuple types are actually of the same type.
If no type is specified, the first element in the list will be the determinator
"""
ret_val = False
if isinstance(list_of_elements, (list, tuple)):
ret_val = True
if len(list_of_elements) > 0:
_expected_type = type(list_of_elements[0]) if expected_type is None else expected_type
for element in list_of_elements[1:]:
if not isinstance(element, _expected_type):
ret_val = False
break
return ret_val |
def get_resolution(original_resolution):
"""Takes (H,W) and returns (precrop, crop)."""
area = original_resolution[0] * original_resolution[1]
return (160, 128) if area < 96*96 else (512, 480) |
def object_gatekeeper(obj, is_auth, ignore_standalone=False):
"""
It's OK to use available_to_public here because the underlying logic is identical.
"""
if not obj:
return False
if is_auth:
return True
else:
try:
return obj.available_to_public
except:
pass
return False |
def sanitize_order(order):
"""
takes an array of values (intended to be some sort of order/indexing thing)
and makes it start at zero and be sequential with no duplicates.
gaps are eliminated, and ties are decided based on of appearance in list
negative numbers are removed
"""
import numpy as np
order = np.array(order)
order = order[order >= 0]
order -= order.min()
# first handle non-uniques by incrementing all values greater than each non-unique one
uniq_order, uniq_indices, uniq_counts = np.unique(
order, return_index=True, return_counts=True)
for idx, num in enumerate(order):
if idx not in uniq_indices:
order[order >= num] = order[order >= num] + 1
# but remember to then decrement the first instance of this number
to_dec = np.where(order == num+1)[0][0]
order[to_dec] = order[to_dec] - 1
# ok, now sort, then handle gaps:
sorti = np.argsort(order)
# use this as an indexer to go back to the order passed in
un_sorti = np.argsort(sorti)
sorted_order = order[sorti]
# now handle gaps:
for idx, num in enumerate(sorted_order):
if idx == 0:
continue
if sorted_order[idx] != sorted_order[idx-1] + 1:
diff = (sorted_order[idx] - sorted_order[idx-1]) - 1
assert diff > 0
msk = sorted_order >= sorted_order[idx]
sorted_order[msk] = sorted_order[msk] - diff
return sorted_order[un_sorti] |
def match_func(fname, json_data):
"""
Match functional images
"""
folder, suffix, attrs, md = "func", None, {}, {}
desc = json_data["SeriesDescription"].lower()
if "fmri" in desc:
if "sbref" in desc:
suffix = "sbref"
else:
suffix = "bold"
if "resting" in desc:
attrs["task"] = "rest"
elif "task" in desc:
attrs["task"] = "task"
md["TaskName"] = "task"
return folder, suffix, attrs, md |
def remove_file_extension(file_name):
"""e.g.: remove_file_extension("hi.jpg") == "hi"
It does not mutate file_name (str is immutable anyway)
:param file_name: <str>
:return: it returns the file_name without the extension
"""
return file_name[:file_name.rindex('.')] |
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example:
.. code-block:: none
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None: return 0
else: return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList |
def fastmail_local_to_remote(folder):
"""
Perform name translation between local and remote
"""
if (folder != 'INBOX'):
return 'INBOX.' + folder
else:
return folder |
def indent_text(text: str, level: int = 0) -> str:
"""Indent each line of ``text`` by ``level`` spaces."""
return "\n".join([" " * level + line for line in text.split('\n')]) |
def assumed_decimal_point(num_less_than_one, digits=7):
""" Return a string with DIGITS of precision, with the decimal point removed
Ignores sign.
"""
num = abs(num_less_than_one)
string_num = "{0:.{DIGITS}f}".format(num,DIGITS=digits)
return(string_num[2:]) |
def camelcase(s: str) -> str:
"""Convert snake case to camel case.
Example:
>>> camelcase("camel_case")
'camelCase'
"""
parts = iter(s.split("_"))
return next(parts) + "".join(i.title() for i in parts) |
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False |
def get_prices_of_products_in_discounted_categories(order, discounted_categories):
"""Get prices of variants belonging to the discounted categories.
Product must be assigned directly to the discounted category, assigning
product to child category won't work.
"""
# If there's no discounted collections,
# it means that all of them are discounted
line_prices = []
if discounted_categories:
discounted_categories = set(discounted_categories)
for line in order:
if not line.variant:
continue
product_category = line.variant.product.category
if product_category in discounted_categories:
line_prices.extend([line.unit_price_gross] * line.quantity)
return line_prices |
def hr_size(num, suffix="B") -> str:
"""
Human-readable data size
From https://stackoverflow.com/a/1094933
:param num: number of bytes
:param suffix: Optional size specifier
:return: Formatted string
"""
for unit in " KMGTPEZ":
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit if unit != " " else "", suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Y", suffix) |
def get_sides_doubled(present):
"""Calculate doubled length of each side of the present."""
return [
side * 2 for side in present
] |
def char_to_bool(letter):
"""Transform character (J/N) to Bool."""
if letter.upper() == 'J':
return True
elif letter.upper() == 'N':
return False
else:
raise ValueError('Invalid character, only J or N allowed.') |
def fahrenheit2kelvin(F):
"""
Convert Fahrenheit to Kelvin
:param F: Temperature in Fahrenheit
:return: Temperature in Kelvin
"""
return 5.0 / 9.0 * (F + 459.67) |
def overlapping_in_bed( fname, r_chr, r_start, r_stop ):
"""
Get from a bed all intervals that overlap the region defined by
r_chr, r_start, r_stop.
"""
rval = []
for line in open( fname ):
if line.startswith( "#" ) or line.startswith( "track" ):
continue
fields = line.split()
chr, start, stop = fields[0], int( fields[1] ), int( fields[2] )
if chr == r_chr and start < r_stop and stop >= r_start:
rval.append( ( chr, max( start, r_start ), min( stop, r_stop ) ) )
return rval |
def _bytes_to_int(byte):
"""Takes some Bytes and returns an Integer."""
return int.from_bytes(byte, 'little') |
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
Args:
start_token: The token to start searching from
func: The function to call to test a token for applicability
end_func: The function to call to test a token to determine whether to abort
the search.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token matching func within distance of this token, or None if no
such token is found.
"""
token = start_token
if reverse:
while token and (distance is None or distance > 0):
previous = token.previous
if previous:
if func(previous):
return previous
if end_func and end_func(previous):
return None
token = previous
if distance is not None:
distance -= 1
else:
while token and (distance is None or distance > 0):
next_token = token.next
if next_token:
if func(next_token):
return next_token
if end_func and end_func(next_token):
return None
token = next_token
if distance is not None:
distance -= 1
return None |
def is_native(obj, module):
"""
Determines if obj was defined in module.
Returns True if obj was defined in this module.
Returns False if obj was not defined in this module.
Returns None if we can't figure it out, e.g. if this is a primitive type.
"""
try:
return module.__name__ in obj.__module__
except (AttributeError, TypeError):
return None |
def strip_headers(post):
"""Find the first blank line and drop the headers to keep the body"""
if '\n\n' in post:
headers, body = post.split('\n\n', 1)
return body.lower()
else:
# Unexpected post inner-structure, be conservative
# and keep everything
return post.lower() |
def _hill_valley_test(obj, x, xn, Nt):
"""
Test if a point `x` and its neighbor `xn` belongs to the same niche based on a Hill-Valley test approach
Parameters
----------
obj : callable
Objective function.
x : array_like, shape (n,)
Point.
xn : array_like, shape (n,)
Neighboring point.
Nt : int
Number of test points.
Returns
-------
test : bool
True if `x` and `xn` belongs to the same niche, otherwise false.
"""
fmax = max(obj(x), obj(xn))
for i in range(1, Nt + 1):
xt = xn + i / (Nt + 1) * (x - xn)
if fmax < obj(xt):
return False
return True |
def calc_price(given_price):
"""
Will use this function to calculate the price for limit orders.
:return: calculated limit price
"""
if given_price == None:
price = given_price
else:
price = given_price
return price |
def local_cmp(a, b):
"""
compares with only values and not keys, keys should be the same for both dicts
:param a: dict 1
:param b: dict 2
:return: difference of values in both dicts
"""
diff = [key for key in a if a[key] != b[key]]
return len(diff) |
def text_get_line(text, predicate):
"""Returns the first line that matches the given predicate."""
for line in text.split("\n"):
if predicate(line):
return line
return "" |
def noam_schedule(step, warmup_step=4000):
""" original Transformer schedule"""
if step <= warmup_step:
return step / warmup_step
return (warmup_step ** 0.5) * (step ** -0.5) |
def filter_version(tag: str) -> str:
"""Transform a version tag string to a proper semver version.
Version tags are usually of the form f"v{semver_version_string}". If that
is the case, this method will strip the version string of the leading "v".
If the string does not follow that convention, the string will not be
transformed.
Args:
tag: The git tag as a string
Returns:
The tag transformed into a SemVer compatible string.
"""
if not tag:
return tag
if tag[0] == "v":
return tag[1:]
return tag |
def is_leap_year(year):
""" Returns True if the passed year is a leap-year, False otherwise. """
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
else:
return True
else:
return False |
def data_tuple(json_dict):
"""
Parses a json data dict to a tuple
Assumes the dict is already in valid tree format.
"""
key = list(json_dict)[0]
return int(key), json_dict[key] |
def get_channel_id(data: dict) -> str:
"""Return channel id from payload"""
channel = data['channel']
return channel |
def round_up(rounded, divider):
"""Round up an integer to a multiple of divider."""
return int(rounded + divider - 1) // divider * divider |
def height_implied_by_aspect_ratio(W, X, Y):
"""
Utility function for calculating height (in pixels)
which is implied by a width, x-range, and y-range.
Simple ratios are used to maintain aspect ratio.
Parameters
----------
W: int
width in pixel
X: tuple(xmin, xmax)
x-range in data units
Y: tuple(xmin, xmax)
x-range in data units
Returns
-------
H: int
height in pixels
Example
-------
plot_width = 1000
x_range = (0,35
y_range = (0, 70)
plot_height = height_implied_by_aspect_ratio(plot_width, x_range, y_range)
"""
return int((W * (Y[1] - Y[0])) / (X[1] - X[0])) |
def create_widget_entry(email_to, email_from, email_to_total, email_from_total) -> dict:
"""
Gets a email to and from addresses, and a to and from total incidents number.
:param email_to: string
email to address
:param email_from: string
email from address
:param email_to_total: int
email to relevant total incidents
:param email_from_total: int
email from relevant total incidents
:return: data
the relevant bar table
"""
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": [
{
"data": [
email_to_total
],
"groups": None,
"name": str(email_to),
"label": f"To: {str(email_to)}",
"color": "rgb(255, 23, 68)"
},
{
"data": [
email_from_total
],
"groups": None,
"name": str(email_from),
"label": f"From: {str(email_from)}",
"color": "rgb(255, 144, 0)"
}
],
"params": {
"layout": "vertical"
}
}
}
return data |
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: :class:`bool`
"""
return getattr(cls, "__attrs_attrs__", None) is not None |
def _redact_secret(data):
""" Modify `data` in-place and replace keys named `secret`. """
if isinstance(data, dict):
stack = [data]
else:
stack = []
while stack:
current = stack.pop()
if 'secret' in current:
current['secret'] = '<redacted>'
else:
stack.extend(
value
for value in current.values()
if isinstance(value, dict)
)
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.