content
stringlengths 42
6.51k
|
|---|
def print_query_parameters(SOURCE, DESTINATION, D_TIME, MAX_TRANSFER, WALKING_FROM_SOURCE, variant, no_of_partitions=None,
weighting_scheme=None, partitioning_algorithm=None):
"""
Prints the input parameters related to the shortest path query
Args:
SOURCE (int): stop-id DESTINATION stop
DESTINATION (int/list): stop-id SOURCE stop. For Onetomany algorithms, this is a list.
D_TIME (pandas.datetime): Departure time
MAX_TRANSFER (int): Max transfer limit
WALKING_FROM_SOURCE (int): 1 or 0. 1 means walking from SOURCE is allowed.
variant (int): variant of the algorithm. 0 for normal version,
1 for range version,
2 for One-To-Many version,
3 for Hyper version
no_of_partitions: number of partitions network has been divided into
weighting_scheme: which weighing scheme has been used to generate partitions.
partitioning_algorithm: which algorithm has been used to generate partitions.
Returns: None
"""
print("___________________Query Parameters__________________")
print("Network: Switzerland")
print(f"SOURCE stop id: {SOURCE}")
print(f"DESTINATION stop id: {DESTINATION}")
print(f"Maximum Transfer allowed: {MAX_TRANSFER}")
print(f"Is walking from SOURCE allowed ?: {WALKING_FROM_SOURCE}")
if variant == 2 or variant == 1:
print(f"Earliest departure time: 24 hour (Profile Query)")
else:
print(f"Earliest departure time: {D_TIME}")
if variant == 4:
print(f"Number of partitions: {no_of_partitions}")
print(f"Partitioning Algorithm used: {partitioning_algorithm}")
print(f"Weighing scheme: {weighting_scheme}")
print("_____________________________________________________")
return None
|
def type_eq(cdm_column_type, submission_column_type):
"""
Compare column type in spec with column type in submission
:param cdm_column_type:
:param submission_column_type:
:return:
"""
if cdm_column_type == 'time':
return submission_column_type == 'character varying'
if cdm_column_type == 'integer':
return submission_column_type == 'int'
if cdm_column_type in ['character varying', 'text', 'string']:
return submission_column_type in ('str', 'unicode', 'object')
if cdm_column_type == 'date':
return submission_column_type in ['str', 'unicode', 'datetime64[ns]']
if cdm_column_type == 'timestamp':
return submission_column_type in ['str', 'unicode', 'datetime64[ns]']
if cdm_column_type == 'numeric':
return submission_column_type == 'float'
raise Exception('Unsupported CDM column type ' + cdm_column_type)
|
def _getSource(parent):
"""Finds the highest level CommandGroup of parent"""
try:
return parent._source
except AttributeError:
return parent
|
def extract_string(data, offset=0):
""" Extract string """
str_end = offset
while data[str_end] != 0:
str_end += 1
return data[offset:str_end].decode("ascii")
|
def evaluate(tokens) -> int:
"""Recursively evaluate a parsed list"""
# base case
if type(tokens) == int:
return tokens
val = evaluate(tokens[0])
op = ""
for i, tok in enumerate(tokens[1:], 1):
if i % 2:
op = tok
else:
other = evaluate(tok)
val = eval(f"{val}{op}{other}")
return val
|
def _st(l):
"""Get the first element."""
return list(l)[0]
|
def GetParentProcess(child_process, ppid, process_pids):
"""Looking for the ppid in the Process PIDs dict. Recurses, until it finds the top one that isnt owned by a system PID (0,1)"""
# If we didnt find the parent, return None
if ppid not in process_pids:
return None
# Get the parent
parent_process = process_pids[ppid]
# If this process's parent is a different name, it's not the legimate child for tracking purposes
if child_process['name'] != parent_process['name']:
return None
# If the PPID's parent is a system PID, then return the parent process, we are done. This is a parent level process
if parent_process['ppid'] in (0, 1):
return parent_process
# Else, we need to look further up for a higher parent
else:
# See if we can find a higher level parent
higher_parent = GetParentProcess(child_process, parent_process['ppid'], process_pids)
if higher_parent != None:
return higher_parent
else:
return parent_process
|
def get_offset_array_start_and_end(array_length, array_offset, arry_2_lngth=None):
""" Usage: array_start, array_end = get_offset_array_start_and_end(array_length, array_offset, arry_2_lngth)
single axis boundry in context of offset
Args:
array_length: image dimension (positive integer)
array_offset: registration offset (signed integer)
arry_2_lngth: length of the second array (positive integer)
Returns: (positive integers)
array_start: first index in image diminsion
array_end: last index + 1 in image diminsion
"""
array_start = max(0, array_offset)
if arry_2_lngth is None:
array_end = min(array_length, array_length + array_offset)
else:
array_end = min(array_length, arry_2_lngth + array_start)
return array_start, array_end
|
def gen_genre_string(genres: list):
"""Return a pretty genre string to look at given a list of genres (strings)."""
genre_string = ''
# Iterate through all genres
for g in genres:
genre_string += g + ', '
# Remove trailing comma
genre_string = genre_string.rstrip(', ')
return genre_string
|
def filter_by_condition(xs, ys, condition_function):
"""
Parameters
----------
xs: list[list[Any]]
ys: list[Any]
condition_function: function: list[Any] -> bool
Returns
-------
list[Any]
"""
assert len(xs) == len(ys)
indices = [i for i, x in enumerate(xs) if condition_function(x)]
zs = [ys[i] for i in indices]
return zs
|
def _upgrading(version, current_version):
"""
>>> _upgrading('0.9.2', '1.9.2')
False
>>> _upgrading('0.11.3', '0.11.2')
True
>>> _upgrading('0.10.2', '0.9.2')
True
>>> _upgrading('1.1.3', '1.1.4')
False
>>> _upgrading('1.1.1', '1.1.1')
False
>>> _upgrading('0.9.1000', '50.1.1')
False
>>> _upgrading('50.1.1', '0.9.1000')
True
"""
version_parts = version.split('.')
c_version_parts = current_version.split('.')
c_version_parts[2] = c_version_parts[2].split('-')[0]
if c_version_parts == version_parts:
return False
for i in range(3):
if int(version_parts[i]) > int(c_version_parts[i]):
return True
elif int(version_parts[i]) < int(c_version_parts[i]):
return False
|
def hash_object(object_):
"""
Returns the hash value of the given object even if it not defines `__hash__`.
Parameters
----------
object_ : `Any`
The object to get it's hash value of.
Returns
-------
hash_value : `int`
"""
try:
hash_value = hash(object_)
except (TypeError, NotImplementedError):
hash_value = object.__hash__(object_)
return hash_value
|
def _ip_sort(ip):
"""Ip sorting"""
idx = "001"
if ip == "127.0.0.1":
idx = "200"
if ip == "::1":
idx = "201"
elif "::" in ip:
idx = "100"
return "{0}___{1}".format(idx, ip)
|
def _reg2float(reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
if reg == 0:
return 0.0
sign = (reg & 0x80000000) >> 31 & 0x01
exp = ((reg & 0x7f800000) >> 23) - 127
if exp == 0:
man = (reg & 0x007fffff) / pow(2, 23)
else:
man = 1 + (reg & 0x007fffff) / pow(2, 23)
result = pow(2, exp) * man * ((sign * -2) + 1)
return float("{0:.2f}".format(result))
|
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
|
def try_value_to_bool(value, strict_mode=True):
"""Tries to convert value into boolean.
strict_mode is True:
- Only string representation of str(True) and str(False)
are converted into booleans;
- Otherwise unchanged incoming value is returned;
strict_mode is False:
- Anything that looks like True or False is converted into booleans.
Values accepted as True:
- 'true', 'on', 'yes' (case independent)
Values accepted as False:
- 'false', 'off', 'no' (case independent)
- all other values are returned unchanged
"""
if strict_mode:
true_list = ('True',)
false_list = ('False',)
val = value
else:
true_list = ('true', 'on', 'yes')
false_list = ('false', 'off', 'no')
val = str(value).lower()
if val in true_list:
return True
elif val in false_list:
return False
return value
|
def _find_directive(directives, directive_name):
"""Find a directive of type directive_name in directives
"""
if not directives or isinstance(directives, str) or len(directives) == 0:
return None
if directives[0] == directive_name:
return directives
matches = (_find_directive(line, directive_name) for line in directives)
return next((m for m in matches if m is not None), None)
|
def check_delim(argument,
default = "\t"):
"""
=================================================================================================
check_delim(argument, default)
This function is used to check file delimiters. Most files I see are tab delimited, hence
the default being tab.
=================================================================================================
Arguments:
argument -> A string holding a (potential) delimiter
defualt -> A string determining the default delimiter
=================================================================================================
Returns: The delimiter, after checking that it is a valid delimiter.
=================================================================================================
"""
# Assert thhat the argument is in the list of accepted arguments.
assert argument in [None,':', '|', ';', '-', '\\', '/', '\\t', ',', "\t", "'\t'",
"':'", "'|'", "';'", "'-'", "'\\'", "'/'", "'\\t'" ,"','"], "The delimiter given is not accepted."
# If the argument is None, then the argument is unset. Thus,
# set it to the preset value of \t
if argument == None:
return default
# If the argument is a properly formatted string, then just return the argument
elif argument in [':', '|', ';', '-', '\\', '/', ',']:
return argument
# If the argument is an improperly formatted string (which happens when you pass
# single-quote strings into the windows command line), then strip the single
# quotes and return the properly formatted string
elif argument in ["':'", "'|'", "';'", "'-'", "'\\'", "'/'" ,"','"]:
return argument.strip("'")
# Or if the argument is some form of the tab character, return the tab
# character
elif argument in ['\\t',"'\\t'", "\t","'\t'" ]:
return "\t"
# If something goes horribly wrong, simply return the tab character.
else:
return default
|
def leapLanePenalty(l_, l):
"""
Returns the penalty of leaping from lane l_ to l. When l and l_ are
the same lane, then the penalty is just the energy cost of next hurdle.
"""
if l_ == l:
return 0
else:
return 1 * abs(l_ - l)
|
def find_option_list(block, key, type, default, loud=False, layer_name=None, layer_idx=-1):
"""
----------
Author: Damon Gwinn (gwinndr)
----------
- Finds the option specified by key and creates a list of values according to type
- Value is parsed according to a ','. Default is assumed to be a list.
- If option not found, uses default
- If default is used and loud is True, prints that the default is being used
- layer_name (str) and layer_idx (int) further specify layer details when printing with loud
----------
"""
if(key in block.keys()):
val = block[key]
val = val.split(",")
val = [s.strip() for s in val]
else:
val = default
if(loud):
if(layer_name is None):
label = ""
else:
label = "%s at idx %d:" % (layer_name, layer_idx)
print(label, "Using default:", default, "for key:", key)
if(val is not None):
val = [type(v) for v in val]
return val
|
def LabelCoordinateTransform(labels, old_wh, new_wh):
"""
Now this function is used to transform landmarks
Args:
list of landmarks: [[x1, y1], [x2, y2], ......]
"""
new_labels = []
for label in labels[0]:
label_temp = []
label_temp.append(min(max(0, label[0] / old_wh[0] * new_wh[0]), new_wh[0]-1))
label_temp.append(min(max(0, label[1] / old_wh[1] * new_wh[1]), new_wh[1]-1))
new_labels.append(label_temp.copy())
return [new_labels]
|
def default_port(scheme):
"""Returns default port for a given scheme (https or http) as string."""
if scheme == "https":
return "443"
elif scheme == "http":
return "80"
assert False
|
def get_match(birthdays):
"""Returns the date object of a birthday that occurs more than once in the list."""
if len(birthdays) == len(set(birthdays)):
return None # All birthdays are unique
# Compare each birthday to every other birthday
for a, birthday_a in enumerate(birthdays):
for b, birthday_b in enumerate(birthdays[a+1 :]):
if birthday_a == birthday_b:
return birthday_a
|
def as_str(string, encoding):
"""
Return the argument `string` converted to a unicode string if it's a
`bytes` object. Otherwise just return the string.
If a conversion is necessary, use `encoding`.
If `string` is neither `str` nor `bytes`, raise a `TypeError`.
"""
if isinstance(string, bytes):
return string.decode(encoding)
elif isinstance(string, str):
return string
else:
raise TypeError("`as_str` argument must be `bytes` or `str`")
|
def LINE_CARD(chassis,card,mda,integrated=False,card_type=None):
"""
Configures a line card in a distributed or integrated deployment model.
Note that it may be possible to omit configuring these explicitly
"""
slot = "A" if integrated else "1"
return {
"timos_line": f"slot={slot} chassis={chassis} card={card} mda/1={mda}",
"card_config": f"""
/configure card 1 card-type {card_type if card_type else card}
/configure card 1 mda 1 mda-type {mda}
""",
}
|
def GeBuildConfigAllBoards(ge_build_config):
"""Extract a list of board names from the GE Build Config.
Args:
ge_build_config: Dictionary containing the decoded GE configuration file.
Returns:
A list of board names as strings.
"""
return [b['name'] for b in ge_build_config['boards']]
|
def _lib_name(lib, version = "", static = False):
"""Constructs the name of a library on Linux.
Args:
lib: The name of the library, such as "hip"
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s.so%s" % (lib, version)
|
def clean_line_endings(unicode_contents):
"""
Oddly there are some fields with embedded "\n" newlines, while "\r\n" is reserved for new rows.
pandas gets (understandably) confused by the embedded newlines.
To solve this, remove all \n from the file, and then convert \r to \n, so we end up with no embedded newlines and
only use \n for new rows, Unix-style.
"""
return unicode_contents.replace(u'\n', u'').replace(u'\r', u'\n')
|
def slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), Ellipsis)
"""
return (slice(None),) * axis + (sl,) + (...,)
|
def __cmr_granule_polygons(search_results):
"""Get the polygons for CMR returned granules"""
if 'feed' not in search_results or 'entry' not in search_results['feed']:
return []
granule_polygons = []
# for each CMR entry
for e in search_results['feed']['entry']:
# for each polygon
for polys in e['polygons']:
coords = [float(i) for i in polys[0].split()]
region = [{'lon':x,'lat':y} for y,x in zip(coords[::2],coords[1::2])]
granule_polygons.append(region)
# return granule polygons in sliderule region format
return granule_polygons
|
def capitalize_title(title):
"""
Capitalize the title of the paper
:param title: str title string that needs title casing
:return: str title string in title case (first letters capitalized)
"""
return title.title()
|
def tamper(payload, **kwargs):
"""
Append a HTTP header 'X-originating-IP' to bypass
WAF Protection of Varnish Firewall
Notes:
Reference: http://h30499.www3.hp.com/t5/Fortify-Application-Security/Bypassing-web-application-firewalls-using-HTTP-headers/ba-p/6418366
Examples:
>> X-forwarded-for: TARGET_CACHESERVER_IP (184.189.250.X)
>> X-remote-IP: TARGET_PROXY_IP (184.189.250.X)
>> X-originating-IP: TARGET_LOCAL_IP (127.0.0.1)
>> x-remote-addr: TARGET_INTERNALUSER_IP (192.168.1.X)
>> X-remote-IP: * or %00 or %0A
"""
headers = kwargs.get("headers", {})
headers["X-originating-IP"] = "127.0.0.1"
return payload
|
def split(s):
"""FUnction to split a string into sections of 5 characters.
Code from:
http://code.activestate.com/recipes/496784-split-string-into-n-size-pieces/"""
return [s[i:i+5] for i in range(0, len(s), 5)]
|
def isPrimePair(s1, s2):
"""
Checks if the two moves are primes of each other assuming both are simple moves.
"""
if(len(s1) >= len(s2)):
a = s1
b = s2
else:
a = s2
b = s1
if(len(a) - len(b) == 1):
if(a[:len(b)] == b and a[-1] == "\'"):
return True
return False
|
def get_pypy_package_name_without_version(pypy_package: str) -> str:
"""
>>> get_pypy_package_name_without_version('wheel==0.32.3')
'wheel'
>>> get_pypy_package_name_without_version('wheel>=0.32.3')
'wheel'
>>> get_pypy_package_name_without_version('wheel<0.32.3')
'wheel'
>>> get_pypy_package_name_without_version('wheel>0.32.3')
'wheel'
"""
pypy_package = pypy_package.split('=')[0]
pypy_package = pypy_package.split('>')[0]
pypy_package = pypy_package.split('<')[0]
return pypy_package
|
def _sanitize_str_to_list(dim):
"""Make dim to list if string, pass if None else raise ValueError."""
if isinstance(dim, str):
dim = [dim]
elif isinstance(dim, list) or dim is None:
dim = dim
else:
raise ValueError(
f"Expected `dim` as `str`, `list` or None, found {dim} as type {type(dim)}."
)
return dim
|
def traverse(obj, path=None, callback=None):
"""
Traverse an arbitrary Python object structure (limited to JSON data
types), calling a callback function for every element in the structure,
and inserting the return value of the callback as the new value.
"""
if path is None:
path = []
if isinstance(obj, dict):
value = {k: traverse(v, path + [k], callback) for k, v in obj.items()}
elif isinstance(obj, list):
value = [traverse(elem, path + [[]], callback) for elem in obj]
else:
value = obj
if value is None:
return '-----------------'
else:
return value
#if callback is None:
# return value
#else:
# return callback(path, value)
|
def splitLineAndComment(line):
"""
Returns a tuple(line, comment) based on a '//' comment delimiter.
Either `line` or `comment` may be ''.
Does not strip whitespace, nor remove more than two slashes.
"""
split = line.split('//',1)
if len(split) == 1:
return (split[0],'')
else:
return tuple(split)
|
def canonical_line_parser(line, **kwargs):
"""
<three_letter_dataset_short_name>, <molecule_ID_name>, <smiles>
"""
data_name, mol_name, smiles = line.split(',')
smiles = smiles.strip('\n')
return data_name, mol_name, smiles
|
def reaction_identifier(rct_sids, prd_sids):
""" SMIRKS-style reaction ID from reactant and product species IDs
"""
rct_str = '.'.join(rct_sids)
prd_str = '.'.join(prd_sids)
rid = rct_str + '>>' + prd_str
return rid
|
def problem_dimensions_setter(dimensions):
"""Sets the problem dimensions, they may be 2 or 3
Args:
dimensions (string): The dimensions of the problem. 2D for two-dimensional problem and 3D for a three-dimensional problem
Raises:
Exception: The input strings must be specifically 2D or 3D
Exception: The input must be a string
Returns:
integer: The problem dimensions
"""
if isinstance(dimensions,str):
if dimensions == '2D':
return int(2)
elif dimensions == '3D':
return int(3)
else:
raise Exception('Please input 2D for a two-dimensional problem and 3D for a three-dimensional problem')
else:
raise Exception('Input must be a string')
|
def _cubic(r):
"""Cubic RBF: :math: `f(x) = x^3`"""
return r*r*r
|
def search_closetag(line, pos=0, tag="tu"):
"""search closing tag"""
tag3 = f"</{tag}>".encode()
try:
pos3 = line.index(tag3, pos)
except ValueError:
pos3 = -1
return pos3
|
def timecode_from_frame(frame_duration, fps=24.0):
"""
Return the timecode corresponding to the given frame, for the given fps.
:param frame_duration: Int representing a number of frames.
:param fps: Float value representing frames per second.
:returns: String representing a non-drop-frame timecode value.
"""
# Total number of seconds in whole clip.
seconds = frame_duration / fps
# Remainder frames from seconds calculation.
remainder = seconds - int(seconds)
frames = int(round(remainder * fps))
# Total number of minutes in the whole clip.
minutes = int(seconds) / 60
# Remainder seconds from minutes calculation
remainder = minutes - int(minutes)
seconds = int(round(remainder * 60))
# Total number of hours in the whole clip.
hours = int(minutes) / 60
# Remainder minutes from hours calculation.
remainder = hours - int(hours)
minutes = int(round(remainder * 60))
# Hours without the remainder.
hours = int(hours)
timecode = "%02d:%02d:%02d:%02d" % (hours, minutes, seconds, frames)
return timecode
|
def make_readable(res):
"""
Eliminates the namespace from each element in the results of a
SPARQL query.
"""
return [[uri.split("#")[-1] for uri in row] for row in res]
|
def split_pmid_and_doi(ids):
"""split the list into integers (which are assumed to be PMIDS)
and strings (which are assumed to be DOIs)"""
pmid=[]
doi=[]
for id in ids:
# if the unique ID is castable as an integer then assume it's a PMID,
# otherwise assume it's a DOI
try:
pmid.append(int(id))
except ValueError:
doi.append(id)
return pmid,doi
|
def lowercase(text):
"""lowercase the text
"""
return text.lower()
|
def remove_similar_lists(lst, lengths_lst, medium_threshold = 200):
""" removes likely assembly errors near repeats by deconvulting medium length contigs
We have already filtered out short contigs, but there are still these that cause problemswith the graph tracing.
"""
# tmp is a list of equal length with the original list, and values are a list
# of the original list's node's lenghts. If the length falls below the threshold,
# we essentially remove it, so that later we can remove duplicates
deduplicated = [] # recipient structure
masked_lengths_list = []
for lengths in lengths_lst:
masked_lengths = [x for x in lengths if x > medium_threshold ]
masked_lengths_list.append(masked_lengths)
# now, identify those lists sharing final nodes.
path_ends = set([x[-1] for x in lst]) # final nodes
for n in path_ends:
# here are all the paths sharing this end point
sublist_nodes = [] # cant use list comp cause I need the indexes
sublist_lengths = []
for i, l in enumerate(lst):
if l[-1] == n:
sublist_nodes.append(l)
sublist_lengths.append(masked_lengths_list[i])
# Within this sublist, make lists of the unique paths
# (though these should be all distinct) and unique path lengths
# (as these could contain duplicates). Then, we check if the lengths
# of list of uniqe lengths and number of paths are the same.
# If they are the same, we add all the paths to the returned list.
# these should always be unique
uniq_paths_to_end = set(tuple(x) for x in sublist_nodes)
uniq_lengths_of_paths_to_end = set(tuple(x) for x in sublist_lengths)
if len(uniq_lengths_of_paths_to_end) != len(sublist_nodes):
# we can tell we have duplicate paths, but we dont know how many.
# There could be two duplicate paths and another distinct path to
# the node, we go uniqe path by unique path.
for uniq_lengths in uniq_lengths_of_paths_to_end:
# for each of these unique length lists, we should be
# returning a representative path
# This marks whether we have found it yet.
# This could probably be refactored with a well-placed "break"
selected_representative = False
for subpath, sublengths in zip(sublist_nodes, sublist_lengths):
# if this sublengh has a duplicate, add the corresponding
# path of only the first one to be returned
if tuple(sublengths) == uniq_lengths and not selected_representative:
deduplicated.append(subpath)
selected_representative = True
else:
deduplicated.extend(sublist_nodes)
return deduplicated
|
def find_content_children(g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
if not g or not s:
return 0
g = sorted(g)
s = sorted(s)
gidx = 0
sidx = 0
count = 0
while gidx < len(g) and sidx < len(s):
if g[gidx] <= s[sidx]:
count += 1
gidx += 1
sidx += 1
else:
sidx += 1
return count
|
def Apriori_gen(Itemset, lenght):
"""Too generate new (k+1)-itemsets can see README Join Stage"""
canditate = []
canditate_index = 0
for i in range (0,lenght):
element = str(Itemset[i])
for j in range (i+1,lenght):
element1 = str(Itemset[j])
if element[0:(len(element)-1)] == element1[0:(len(element1)-1)]:
unionset = element[0:(len(element)-1)]+element1[len(element1)-1]+element[len(element)-1] #Combine (k-1)-Itemset to k-Itemset
unionset = ''.join(sorted(unionset)) #Sort itemset by dict order
canditate.append(unionset)
return canditate
|
def levels_for_params(context, param_dict):
"""
inverse of the above function - given a set of parameters,
look up what we expect the safe multiplicative depth to be.
"""
if context == "HElib_Fp":
return param_dict["Levels"]-4
elif context == "SEAL":
if param_dict["N"] == 2048:
return 1
elif param_dict["N"] == 4096:
return 10
# elif param_dict["N"] == 8192:
# return 5
# elif param_dict["N"] == 16384:
# return 8
# elif param_dict["N"] == 32768:
# return 9
else:
raise RuntimeError("Unrecognized value of N parameter")
else:
# print("Levels not known for this context")
return 0
|
def _mergepeaks(a, b):
"""merge 2 peaks to 1 peak information
a, b are peak elements returned by peakdetect_simpleedge, consists of
((pvalue, pidx), (sidx, eidx)),
pvalue is value at pidx
pidx is positive peak value
sidx is the start index of the peak
eidx is the end index of the peak, y[sidx:eidx] takes whole curve
including the peak
return value is a tuple of peak element of
((max(a.pvalue, b.pvalue),
[index of new peak]),
(a.sidx, b.eidx))
"""
return (max(a[0], b[0]), (a[1][0], b[1][1]))
|
def sort_genes(gene_dict, key):
"""Return list of genes ordered by key, in descending order
"""
sorted_genes = sorted(
gene_dict.items(), key=lambda x: x[1][key],
reverse=True
)
# print(f"Gene with highest {key}: {str(sorted_genes[0])}")
return sorted_genes
|
def FindPosition(point, points):
"""Determines the position of point in the vector points"""
if point < points[0]:
return -1
for i in range(len(points) - 1):
if point < points[i + 1]:
return i
return len(points)
|
def _f_flags_to_set(f_flags):
"""Transform an int f_flags parameter into a set of mount options.
Returns a set.
"""
# see /usr/include/sys/mount.h for the bitmask constants.
flags = set()
if f_flags & 0x1:
flags.add('read-only')
if f_flags & 0x1000:
flags.add('local')
if f_flags & 0x4000:
flags.add('rootfs')
if f_flags & 0x4000000:
flags.add('automounted')
return flags
|
def ordinal_str(n):
""" Converts a number to and ordinal string.
"""
return str(n) + {1: 'st', 2: 'nd', 3: 'rd'}.get(4 if 10 <= n % 100 < 20 else n % 10, "th")
|
def getMarriageRecordId(record):
"""Returns a fairly unique marriage record identifier.
May not be absolutely unique.
"""
return "{}_{}_{}_{}_{}_{}".format(
record['year'],
record['record_number'],
record['husband_first_name'],
record['husband_last_name'],
record['wife_first_name'],
record['wife_last_name'])
|
def rpexp(a,b):
"""Russian peasant exponention.
Exponention based on Russian peasant multiplication algorithm,
taken from "Ten Little Algorithms" by Jason Sachs.
Args:
a (int): the base,
b (int): the exponent.
Returns:
x (int): the b power of a, a**b.
"""
result = 1
while b != 0:
if b & 1:
result *= a
b >>= 1
a *= a
return result
|
def get_max_length(lst):
"""
For a list of lists returns the maximum length found
:param grammar: input list
:return: Length of largest sublist
"""
return max([len(l) for l in lst])
|
def isfloat(value):
"""
Check the value is convertable to float value
"""
try:
float(value)
return True
except ValueError:
return False
|
def W_0(phi_0, m, om, N):
"""Vertical velocity amplitude. Wavenumber and frequency should be in
angular units."""
return (-om*m/(N**2 - om**2))*phi_0
|
def intersection(st, ave):
"""Represent an intersection using the Cantor pairing function."""
return (st+ave)*(st+ave+1)//2 + ave
|
def calc_long(historicals, i):
"""
Use exponential curve fitting to calculate the upper and lower bounds
"""
if historicals[0] != 0:
rate_of_change = (historicals[i] / historicals[0])**(1 / float(i))
return [
round(historicals[i] * rate_of_change * 1.05, 2),
round(historicals[i] * rate_of_change * 0.95, 2)
]
return None
|
def scream(text):
"""I have no mouth and I must ... yeah"""
return text.upper()
|
def remove_dup(seq):
"""
https://stackoverflow.com/questions/480214/
how-do-you-remove-duplicates-from-a-list-whilst-preserving-order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
|
def make_arguments(action: str) -> dict:
"""
Make an argument dictionary
Parameters
----------
action : string
The action to execute
"""
return {
"action": action,
"file": None,
"folder": None,
"device": None,
"all": False,
"move_path": None,
"from_device": None,
}
|
def prime_factors(num: int) -> list:
"""
returns a list of prime factors of a number
:param num: the number to get the prime factors of
:return: list of prime factors
Example:
>>> prime_factors(12)
>>> [2, 2, 3]
"""
factors = []
i = 2
while i * i <= num:
if num % i:
i += 1
else:
num //= i
factors.append(i)
if num > 1:
factors.append(num)
return factors
|
def eh_tabuleiro(tab): # universal -> booleano
"""
Analisa se o argumento que foi introduzido satisfaz as condicoes para ser considerardo um tabuleiro (ser um tuplo
constituido por 3 tuplos, cada um deles constituido por 3 elementos, sendo que estes elementos terao que ser 1, -1
ou 0)
Argumentos:
tab - Argumento que sera analisado.
Retorno:
True - O argumento e um tabuleiro.
False - O argumento e um tabuleiro.
"""
if not isinstance(tab, tuple):
return False
if len(tab) != 3:
return False
for elemts in tab:
if type(elemts) != tuple:
return False
if len(elemts) != 3:
return False
for elemt in elemts:
if type(elemt) != int:
return False
if elemt != 1 and elemt != 0 and elemt != -1:
return False
return True
|
def status_info(status):
"""Return track, artist, album tuple from spotify status.
Arguments:
status (dict): Spotify status info
Returns:
tuple (track, artist, album)
"""
try:
artist = status['item']['artists'][0]['name']
except Exception:
artist = 'unknown'
try:
track = status['item']['name']
except Exception:
track = 'unknown'
try:
album = status['item']['album']['name']
except Exception:
album = 'unknown'
return track, artist, album
|
def filter_queryset_metadata_uuid(queryset, uuid):
""" Filters a given REST framework queryset by a given uuid.
Only keeps the element which holds the uuid.
Args:
queryset: A queryset containing elements
uuid: A uuid
Returns:
queryset: The given queryset which only contains matching elements
"""
if uuid is not None and len(uuid) > 0:
queryset = queryset.filter(
uuid=uuid
)
return queryset
|
def _d3(E, f):
"""
Solve Eq. 22
"""
return -f[0] / (f[1] - 0.5 * f[0] * f[2] / f[1])
|
def final_model_input(messages, names):
"""
Prepare dictionary with sender:message pairs.
:param messages:
:param names:
:return:
"""
# create list of lists
large_ls = []
for name in names:
large_ls.append([[i[0], i[1]] for i in messages if i[0] == name])
final_fucking_dict = dict([large_ls[i][-1] for i in range(len(names))])
return final_fucking_dict
|
def list_to_dict(dict_list: list) -> dict:
"""
Return a dictionary from a list `[key, value, key_2, value_2...]`
.. code-block:: python
>>> lst = ["A","T", "R", 3]
>>> list_to_dict(lst)
{"A": "T", "R": 3}
Args:
dict_list (list[str]): Dictionary as a list
Returns:
dict: Dictionary
"""
dictionary = {dict_list[i]: dict_list[i + 1] for i in range(0, len(dict_list), 2)}
return dictionary
|
def fprint (func,max_lines=100,exclude_docstring=True,show=True):
""" function print : Prints out the source code (from file) for a function
inspect.getsourcelines(func)
"""
import inspect
filepath = inspect.getsourcefile(func)
code_lines,num = inspect.getsourcelines(func)
# -----------------------
to_print = []
to_print.append("from: '{}'\n".format(filepath))
to_print.append("line: {}\n\n".format(num))
to_print += code_lines
to_print = str("".join(to_print[:max_lines]))
# -----------------------
if exclude_docstring:
msg = ' <docstring see help({})> '.format(func.__name__)
to_print = to_print.replace(func.__doc__,msg)
if show:
print(to_print)
else:
return to_print
|
def unquoted(s):
""" Given a string, return an unquoted string as per RFC 3501, section 9."""
if isinstance(s, str):
if (s[0], s[-1]) == ('"', '"'):
return s[1:-1].replace('\\"', '"').replace('\\\\', '\\')
return s
else:
if (s[0], s[-1]) == (b'"', b'"'):
return s[1:-1].replace(b'\\"', '"').replace(b'\\\\', b'\\')
return s
|
def get_start_activities_threshold(salist, decreasing_factor):
"""
Get start attributes cutting threshold
Parameters
----------
salist
Sorted start attributes list
decreasing_factor
Decreasing factor of the algorithm
Returns
---------
threshold
Start attributes cutting threshold
"""
threshold = salist[0][1]
for i in range(1, len(salist)):
value = salist[i][1]
if value > threshold * decreasing_factor:
threshold = value
return threshold
|
def iterative_gauss_n(sigma_small, sigma_large):
"""Compute iterations of small sigma which is equal to using big sigma."""
return int((sigma_large / sigma_small)**2.)
|
def markNonoverlapp(idea, oldest_idea_id, oldest_idea_node_name, oldest_idea_target):
"""
Mark IDEA as non-overlapping event.
:return: marked IDEA
"""
# If idea is present
if idea:
# Node.Name and Target has to be different, if true mark will be added
if oldest_idea_node_name != idea.node_name and oldest_idea_target != idea.target_ip4:
# Add id mark for non-overlapping event
idea.aida_non_overlapping=oldest_idea_id
# Return only IDEA
return idea
|
def getReadablePortion(lines: list) -> list:
"""Returns the portion of lines that starts with the line with text 'START'"""
start = lines.index('START\n')
return lines[start:]
|
def mergeDicts(*dicts):
"""Compatibility polyfill for dict unpacking for python < 3.5
See PEP 448 for details on how merging functions
:return: merged dicts
:rtype: dict
"""
# synonymous with {**dict for dict in dicts}
return {k: v for d in dicts for k, v in d.items()}
|
def to_digits_base10(n):
"""
Return the digits of a number in base 10.
"""
digits = []
remaining = n
while remaining > 0:
digit = remaining % 10
remaining = (remaining - digit) // 10
digits.append(digit)
return digits[::-1]
|
def cx_gate_counts_deterministic(shots, hex_counts=True):
"""CX-gate circuits reference counts."""
targets = []
if hex_counts:
# CX01, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'0x2': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX10.(X^I), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX01.(X^X), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX10.(X^X), |10> state
targets.append({'0x2': shots}) # {"00": shots}
else:
# CX01, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'10': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX10.(X^I), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX01.(X^X), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX10.(X^X), |10> state
targets.append({'10': shots}) # {"00": shots}
return targets
|
def unsetbit(x, nth_bit):
"""unset n-th bit (i.e. set to 0) in an integer or array of integers
Args:
x: integer or :class:`numpy.ndarray` of integers
nth_bit: position of bit to be set (0, 1, 2, ..)
Returns:
integer or array of integers where n-th bit is unset while all other bits are kept as in input x
Examples:
>>> unsetbit(7, 2)
3
>>> unsetbit(8, 2)
8
"""
if nth_bit < 0:
raise ValueError('position of bit cannot be negative')
mask = 1 << nth_bit
return x & ~mask
|
def tag2ate(tag_sequence):
"""
:param tag_sequence:
"""
n_tags = len(tag_sequence)
ate_sequence = []
beg, end = -1, -1
for i in range(n_tags):
ate_tag = tag_sequence[i]
if ate_tag == 'S':
ate_sequence.append((i, i))
elif ate_tag == 'B':
beg = i
elif ate_tag == 'E':
end = i
if end > beg > -1:
ate_sequence.append((beg, end))
beg, end = -1, -1
return ate_sequence
|
def add_space(string, length):
"""This method is used to format Unique ID into the Eddystone format."""
return " ".join(string[i : i + length] for i in range(0, len(string), length))
|
def all_increasing(distances, past_distances):
"""Check if particles distances are all increasing."""
for particle_number, distance in distances.items():
if distance < past_distances[particle_number]:
return False
return True
|
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
|
def If(test=True, then=None, otow=None):
""" enable Task("t1").add(If(test= (1==1),
then= Variables(ONE=1),
otow= Variables(TWO=2)))
appreciate that both branches are evaluated, using this If class
ie there is no 'dead code' as it is with python language 'if' structure
using If to distinguish od/rd mode request that both users share
the variables (parameter.py) and ecf.py
otow: on the other way?
"""
if test:
return then
return otow
|
def train_list_filename(split_id):
"""[summary]
Args:
split_id ([type]): [description]
Returns:
[type]: [description]
"""
return f'train_{split_id}.json'
|
def qw_api_error(arg1, arg2, path, headers=None, payload=None):
"""This function is used when an unpredicatble error shoudl be returned
as if something went wrong with the API call. None of the arguments will
be used.
"""
return {'data': {
'errors': [{
'code': 'unpredictable_error',
'message': 'API call not performed as expected'
}]
}}
|
def convert_body_to_bytes(resp):
"""
If the request body is a string, encode it to bytes (for python3 support)
By default yaml serializes to utf-8 encoded bytestrings.
When this cassette is loaded by python3, it's automatically decoded
into unicode strings. This makes sure that it stays a bytestring, since
that's what all the internal httplib machinery is expecting.
For more info on py3 yaml:
http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support
"""
try:
if resp["body"]["string"] is not None and not isinstance(resp["body"]["string"], bytes):
resp["body"]["string"] = resp["body"]["string"].encode("utf-8")
except (KeyError, TypeError, UnicodeEncodeError):
# The thing we were converting either wasn't a dictionary or didn't
# have the keys we were expecting. Some of the tests just serialize
# and deserialize a string.
# Also, sometimes the thing actually is binary, so if you can't encode
# it, just give up.
pass
return resp
|
def bubble_sort(list_):
"""
compare 2 values every time
"""
for i in range(len(list_)):
for j in range(len(list_) - 1):
if list_[j] > list_[j + 1]:
list_[j], list_[j + 1] = list_[j + 1], list_[j]
return list_
|
def wiggle_numbers(nums):
"""Wiggle numbers:
Given a list of number, return the list with all even numbers doubled, and all odd numbers turned negative.
>>> wiggle_numbers([72, 26, 79, 70, 20, 68, 43, -71, 71, -2])
[144, 52, -79, 140, 40, 136, -43, 71, -71, -4]
"""
lst = []
for x in nums:
if x%2 == 0:
x = 2*x
elif x%2 !=0:
x = -x
lst.append(x)
return lst
pass
|
def get_ssh_options(config_file, options):
"""Returns the SSH arguments for the given parameters. Used by
commands that wrap SSH.
:param config_file: SSH config file.
:type config_file: str | None
:param options: SSH options
:type options: [str]
:rtype: str
"""
ssh_options = ' '.join('-o {}'.format(opt) for opt in options)
if config_file:
ssh_options += ' -F {}'.format(config_file)
if ssh_options:
ssh_options += ' '
return ssh_options
|
def szrange(charge, nsingle):
"""
Make a list giving :math:`S_{z}` values for given charge.
Parameters
----------
charge : int
Value of the charge.
nsingle : int
Number of single particle states.
Returns
-------
list
List containing :math:`S_{z}` values for given charge.
"""
szmax = min(charge, nsingle-charge)
return list(range(-szmax, szmax+1, +2))
# return range(szmax, -szmax-1, -2)
|
def getAllDifferentValues(clustersPerEpoch):
"""
Get all the different values ocurring during a simulation
:param clustersPerEpoch: List with dictionaries for all epochs. The dictionary
has the set of different values (according to column) and their
number
:type clustersPerEpoch: list
:returns: set -- Set containing all values ocurring during a simulation
"""
allValues = set()
for epochSummary in clustersPerEpoch:
for value in epochSummary:
allValues.update([value])
return allValues
|
def condition_1(arg):
"""
CONDITION 1: Doesn't contain bu, ba or be
:param arg:
:return:
"""
if 'be' not in arg and 'ba' not in arg and 'bu' not in arg:
return True
else:
return False
|
def zero_999(numb, flag=1):
"""Returns a string that represents the numb variable in ordinal or
cardinal notation.
numb must be between 0 and 999. flag must be 0 for ordinal notation or
greater than zero for cardinal notation.
"""
# Constants
ZERO_NINETEEN = ["zero", "um", "dois", "tres", "quatro", "cinco", "seis",
"sete", "oito", "nove", "dez", "onze", "doze", "treze",
"catorze", "quinze", "dezesseis", "dezessete",
"dezoito", "dezenove"]
TWENTY_NINETY_NINE = ["vinte", "trinta", "quarenta", "cinquenta",
"sessenta", "setenta", "oitenta", "noventa"]
TWO_NINE_HUNDRED = ["duzentos", "trezentos", "quatrocentos", "quinhentos",
"seiscentos", "setecentos", "oitocentos", "novecentos"]
ZERO_NINETH = ["zero", "primeiro", "segundo", "terceiro", "quarto",
"quinto", "sexto", "setimo", "oitavo", "nono"]
TENTH_NINETYTH = ["decimo", "vigesimo", "trigesimo", "quadragesimo",
"quinquagesimo", "sexagesimo", "septuagesimo",
"octogesimo", "nonagesimo"]
ONE_NINE_HUNDREDTH = ["centesimo", "ducentesimo", "trecentesimo",
"quadringentesimo", "quingentesimo", "sexcentesimo",
"septingentesimo", "octingentesimo",
"noningentesimo"]
str = "" # String that'll be returned.
if flag: # Cardinal
if not numb: # Catch 0
str = ''.join([str,ZERO_NINETEEN[numb]])
else: # Catch 1..999
while numb:
if 1 <= numb <= 19:
str = ''.join([str, ZERO_NINETEEN[numb]])
break
elif 20 <= numb <= 99:
str = ''.join([str, TWENTY_NINETY_NINE[numb / 10 - 2]])
numb %= 10
if numb:
str = ''.join([str, " e "])
elif numb == 100:
str = ''.join([str, "cem"])
numb %= 100
elif 101 <= numb <= 199:
str = ''.join([str, "cento e "])
numb %= 100
elif 200 <= numb <= 999:
str = ''.join([str, TWO_NINE_HUNDRED[numb / 100 - 2]])
numb %= 100
if numb:
str = ''.join([str, " e "])
else: # Ordinal
if not numb: # Catch 0
str = ''.join([str,ZERO_NINETH[numb]])
else: # Catch 1..999
while numb:
if 1 <= numb <= 9:
str = ''.join([str, ZERO_NINETH[numb]])
break
elif 10 <= numb <= 99:
str = ''.join([str, TENTH_NINETYTH[numb / 10 - 1]])
numb %= 10
if numb:
str = ''.join([str, ' '])
elif 100 <= numb <= 999:
str = ''.join([str, ONE_NINE_HUNDREDTH[numb / 100 - 1]])
numb %= 100
if numb:
str = ''.join([str, ' '])
return str
|
def annot_type(ann):
"""
Returns what type of annotation `ann` is.
"""
return tuple(sorted(set(ann) & {'bbox', 'line', 'keypoints'}))
|
def format_syntax_error(e: SyntaxError) -> str:
""" Formats a SyntaxError. """
if e.text is None:
return '```py\n{0.__class__.__name__}: {0}\n```'.format(e)
# display a nice arrow
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(
e, '^', type(e).__name__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.