content stringlengths 42 6.51k |
|---|
def _escape(txt):
"""Basic html escaping."""
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
return txt |
def VersionSplit(ThisVersion):
"""
Splits up a String Version number (e.g: '8.59.10') into a numerical array for numeric sorting.
"""
VersionBreak=ThisVersion.split(".")
VersionParts=[]
VersionParts.append(int(VersionBreak[0]))
VersionParts.append(int(VersionBreak[1]))
RelVer = VersionBreak[2].split('-RC')
VersionParts.append(int(RelVer[0]))
if len(RelVer) == 2:
VersionParts.append(int(RelVer[1]))
return VersionParts |
def parse_volume_string(volume_string):
"""Parses a volume specification string SOURCE:TARGET:MODE into
its components, or raises click.BadOptionUsage if not according
to format."""
try:
source, target, mode = volume_string.split(":")
except ValueError:
raise ValueError("Volume string must be in the form "
"source:target:mode")
if mode not in ('rw', 'ro'):
raise ValueError("Volume mode must be either 'ro' or 'rw'")
return source, target, mode |
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time |
def prob_above_name(diagnostic: str) -> str:
"""Inline function to construct probability cube name"""
return f"probability_of_{diagnostic}_above_threshold" |
def ensure_bytes(x):
"""Convert the given string to :class:`bytes` if necessary.
If the argument is already :class:`bytes`, it is returned unchanged; if it is :class:`str`, it is encoded as UTF-8.
"""
if isinstance(x, bytes):
return x
# "All char * in the runtime API should be considered to have UTF-8 encoding."
# https://developer.apple.com/documentation/objectivec/objective_c_runtime?preferredLanguage=occ
return x.encode('utf-8') |
def p2a(p, m1, m2):
"""
It computes the separation (Rsun) given m1 (Msun), m2 (Msun) and p (days).
"""
yeardy=365.24
AURsun=214.95
p = p/yeardy
a = AURsun*(p*p*(m1 + m2))**(1./3.)
return a |
def map_values(function, dictionary):
"""Apply `function` to every value of `dictionary` and return
a dictionary of the results.
>>> d = {'1': 1, '2': 2}
>>> sorted_items(map_values(lambda value: 2 * value, d))
[('1', 2), ('2', 4)]
"""
return dict([(key, function(value)) for key, value in dictionary.items()]) |
def parseMovie(line):
"""
Parses a movie record in MovieLens format movieId::movieTitle .
"""
fields = line.strip().split("::")
return int(fields[0]), fields[1] |
def _log_rescale(baseline, mode='mean'):
"""Log the rescaling method."""
if baseline is not None:
valid_modes = ('logratio', 'ratio', 'zscore', 'mean', 'percent',
'zlogratio')
if mode not in valid_modes:
raise Exception('mode should be any of : %s' % (valid_modes, ))
msg = 'Applying baseline correction (mode: %s)' % mode
else:
msg = 'No baseline correction applied'
return msg |
def placement_strategy_validator(x):
"""
Property: PlacementStrategy.Type
"""
valid_values = ["random", "spread", "binpack"]
if x not in valid_values:
raise ValueError(
"Placement Strategy type must be one of: %s" % ", ".join(valid_values)
)
return x |
def scale_range_1d(low, high, amount):
"""Multiply range by `amount`, keeping the range centered."""
span = high - low
center = low + span / 2
new_span = amount * span
return center - new_span / 2, center + new_span / 2 |
def select_color_marker(i):
""" Return index-based marker/color format for plotting """
colors = ['b', 'g', 'r', 'c', 'y', 'k']
style = ['-', '--', '-.', ':']
ci = i % len(colors)
si = (i // len(colors)) % len(style)
return '%s%s' % (colors[ci], style[si]) |
def get_unsigned_short(data, index):
"""Return two bytes from data as an unsigned 16-bit value.
Args:
data (list): raw data from sensor
index (int): index entry from which to read data
Returns:
int: extracted unsigned 16-bit value
"""
return (data[index + 1] << 8) + data[index] |
def circularRotation(arr, direction=0, n=1):
"""
Circular shift to direction (left=0, right=1) of n (=1 by default) bits
Output: bytes
"""
nB = len(arr) * 8
arrInt = int.from_bytes(arr, "big")
# Generate full bytes of 1 of the size of the array
size = int("0x" + "".join(["FF" for _ in range(0, len(arr))]), 16)
# ((arrInt << n) shift to left, create 0 to the right
# (arrInt >> (nB - n))) get all bytes from left who needs to go right to the right, remainder is 0
# AND the two bytes
# & size remove from left the oversized bits
r = 0
if direction == 0:
r = ((arrInt << n) | (arrInt >> (nB - n))) & size
else:
r = ((arrInt >> n) | (arrInt << (nB - n))) & size
return r.to_bytes(len(arr), "big") |
def message_to_pretty_string(incoming_message):
"""
Args:
incoming_message: dict
Returns:
str
"""
try:
formatted_message = "{0} | {1} | {2} | [errno:{3}] | {4} | {5} | data: {6} | {7}".format(
incoming_message['result'],
incoming_message['action'],
incoming_message['target'],
incoming_message['error_code'],
incoming_message['error_message'],
incoming_message['linenum'],
incoming_message['data'],
incoming_message['timestamp'])
except KeyError:
formatted_message = "{0} | {1} | {2} | data: {3} | {4}".format(incoming_message['result'],
incoming_message['action'],
incoming_message['target'],
incoming_message['data'],
incoming_message['timestamp'])
return formatted_message |
def to_json(wad_data):
"""
Output statistics as raw data.
"""
import json
return json.dumps(wad_data, indent=2) |
def get_int_values(layoutmessage):
""" Converts a layout from a str message to a list of numbers
From a string of the form "infoname= { ...int values...}" returns only the
int values in a list of integers
Parameters
----------
layoutmessage: str
A str describing the hex or num Layout (e.g. hexLayout= { 50 6 ... 6})
Returns
-------
layout: int list
The mapped list of integers
"""
# DONE convert str message to int list
# split and keep only { ... } part
# convert it to list of ints DONE
# string with only the numbers
# keep only text within brackets
numstr = layoutmessage[layoutmessage.find("{")+1:layoutmessage.find("}")]
# integer list from the numstr
# split on spaces, map strings to ints
# list is needed from python 3...
layout = list(map(int, numstr.split()))
# OBSOLETE numstr = re.searc# OK h(r'\{.*?\}', layoutmessage)
# OK print(numstr)
# OK print('-return value-')
# OK print(layout)
return layout |
def byteToHex(data):
"""
byteToHex - convert a byte array into a comma sepparated hex representation
Args:
data: the byte array to be converted
Returns:
returns comma separated hex representation.
"""
result = '0x{:02X}'.format(data[0])
for ch in data[1:]:
result += ',0x{:02X}'.format(ch)
return result |
def ExpMag( x, params ):
"""Compute surface brightness for a profile consisting of an exponential,
given input parameters in vector params:
params[0] = mu_0
params[1] = h
"""
mu_0 = params[0]
h = params[1]
return mu_0 + 1.085736*(x/h) |
def normalize(weight_list, minval=None, maxval=None):
"""Normalize the values in a list between 0 and 1.0.
The normalization is made regarding the lower and upper values present in
weight_list. If the minval and/or maxval parameters are set, these values
will be used instead of the minimum and maximum from the list.
If all the values are equal, they are normalized to 0.
"""
if not weight_list:
return ()
if maxval is None:
maxval = max(weight_list)
if minval is None:
minval = min(weight_list)
maxval = float(maxval)
minval = float(minval)
if minval == maxval:
return [0] * len(weight_list)
range_ = maxval - minval
return ((i - minval) / range_ for i in weight_list) |
def _generate_scheme_shapes(transition_scheme, dict_of_schemes):
"""
returns a dict of the same structure as schemes, but with each value being the (scalar) 1D length of the scheme element
"""
scheme_shapes = {}
for _k, scheme in dict_of_schemes.items():
scheme_shapes[_k] = scheme.get_output_sizes(transition_scheme)
return scheme_shapes |
def flatten_dict(d):
"""Flatten out a nested dictionary"""
def expand(key, value):
if isinstance(value, dict):
return [(k, v) for k, v in flatten_dict(value).items()]
else:
return [(key, value)]
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items) |
def _unsur(s: str) -> str:
"""Merge surrogates."""
return s.encode("utf-16", "surrogatepass").decode("utf-16", "surrogatepass") |
def convert_arg_line_to_args(arg_line):
"""argparse helper for splitting input from config
Allows comment lines in configfiles and allows both argument and
value on the same line
"""
if arg_line.strip().startswith('#'):
return []
else:
return arg_line.split() |
def index_of(seq, value, from_index=0):
"""
Description
----------
Return the index of a value in a sequence.\n
Returns -1 if the value was not found.
Parameters
----------
seq : (list or tuple or string) - sequence to iterate\n
value: any - value to search for\n
from_index : int, optional - start index (default is 0)
Returns
----------
int - index of value or -1 if the value was not found
Example
----------
>>> lst = [1, 2, 3, 4, 5]
>>> index_of(lst, 3)
-> 2
>>> index_of(lst, 3, 3)
-> -1
"""
if not isinstance(seq, (list, tuple, str)):
raise TypeError("param 'seq' must be a list, tuple, or string")
if from_index < 0:
length = len(seq) * -1
while from_index >= length:
if seq[from_index] == value:
return (length * -1) + from_index
from_index = from_index - 1
return -1
while from_index < len(seq):
if seq[from_index] == value:
return from_index
from_index = from_index + 1
return -1 |
def sorted_nicely(l):
"""
Sort the given iterable in the way that humans expect.
:param l: a list
:return:
"""
import re
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key) |
def show_minutes(value, show_units):
"""Return nicely formatted minutes."""
if value is None:
return "unknown"
retval = round(float(value) / 60.0, 2)
if show_units:
return f"{retval} min"
return retval |
def splitpop(string, delimeter):
"""
Splits a string along a delimiter, and returns the
string without the last field, and the last field.
>>> splitpop('hello.world.test', '.')
'hello.world', 'test'
"""
fields = string.split(delimeter)
return delimeter.join(fields[:-1]), fields[-1] |
def check_depends(depends, value):
"""Check if the config identified in value is a simple dependency
listed in the depends expression.
A simple expression consists of just && and || boolean operators.
If the expression uses any other operator, return False.
This is used by the menu_parse below to indent dependent configs.
"""
if depends is None:
return False
assert type(depends) == tuple
assert len(depends) in [2, 3]
if depends[0] == 'and':
return (check_depends(depends[1], value) or
check_depends(depends[2], value))
elif depends[0] == 'or':
return (check_depends(depends[1], value) and
check_depends(depends[2], value))
elif depends[0] == 'identifier':
return depends[1] == value
return False |
def extract_gist_id(gist_string):
"""Extract the gist ID from a url.
Will also work if simply passed an ID.
Args:
gist_string (str): Gist URL.
Returns:
string: The gist ID.
Examples:
gist_string : Gist url 'https://gist.github.com/{user}/{id}'.
"""
return gist_string.split("/")[-1] |
def object_merge(old, new, merge_lists=True):
"""
Recursively merge two data structures
Thanks rsnyman :)
https://github.com/rochacbruno/dynaconf/commit/458ffa6012f1de62fc4f68077f382ab420b43cfc#diff-c1b434836019ae32dc57d00dd1ae2eb9R15
"""
if isinstance(old, list) and isinstance(new, list) and merge_lists:
for item in old[::-1]:
new.insert(0, item)
if isinstance(old, dict) and isinstance(new, dict):
for key, value in old.items():
if key not in new:
new[key] = value
else:
object_merge(value, new[key])
return new |
def keyword_list(value):
"""Ensure keywords are treated as lists"""
if isinstance(value, list): # list already
return value
else: # csv string
return value.split(',') |
def obj_fkt(mean, std):
"""
Function to estimate objective function value based on gaussian
distribution.
Parameters
----------
mean : float
Mean value of distribution
std : float
Standard deviation of distribution
Returns
-------
obj_val : float
Objective function output value (larger is better)
"""
return mean / (std ** (10 / 25)) |
def is_ok( text ):
"""Tells if the given text ends with "OK", swallowing trailing blanks."""
while text and text[-1] in "\r\n\t ":
text = text[:-1]
return text.endswith("OK") |
def similarity_threshold(h, bands):
"""
Function returning the Jaccard similarity threshold for minhash signature
composed of h integers and a signature matrix divided in n bands.
Args:
h (int): Number of integers in the minhash signature.
bands (int): Number of bands dividing the signature matrix.
Returns:
float: The Jaccard similarity threshold.
"""
return (1.0 / bands) ** (1 / (h / bands)) |
def format_json_data(json_data):
"""Format API results for CSV.
Format api json data to accommodate for missing keys
The goal of this function is to bring uniformity to the api
returned data so it can be reported in csv format.
"""
list_dict = []
checks = ["cloud_service", "cloud_asset_type_id", "cloud_asset_type", "nist_benchmark",
"cis_benchmark", "fql_policy", "policy_settings", "pci_benchmark",
"soc2_benchmark", "cloud_service_subtype"]
for data_row in json_data:
for check in checks:
if check not in data_row:
data_row[check] = ""
list_dict.append(data_row)
return list_dict |
def format_single_row(input_data, output_header, header_map={}):
""" input_data: raw data
output_header: the header to set up as the data
header_map: if the variables are named different than the output
"""
row = []
for each in output_header:
if header_map:
try:
# dp = str(input_data.get(header_map[each]))
dp = input_data[header_map[each]]
# if each == "date" and re.match("\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}", dp): # noqa
# dp = int(
# time.mktime(datetime.datetime.strptime(dp, "%Y-%m-%d %H:%M:%S").timetuple())
# )
row.append(dp)
except Exception as e:
# do some logging
print("lol error: ",e)
pass
else:
try:
row.append(input_data[header_map[each]])
except Exception:
# do some logging
pass
return row |
def group_by(list_, key, pop_key=False):
"""
Return a dict that groups values
"""
grouped = {}
key_path = key.split(".")
for item in list_:
value = item
for key in key_path:
value = value.get(key)
if value not in grouped:
grouped[value] = []
grouped[value].append(item)
return grouped |
def example(a: int, b: int) -> int:
"""
Returns the sum of a and b, except one or both are 0 then it returns 42.
:param a: The first operand
:type a: int
:param b: The second operand
:type b: int
:returns: The conditional sum
:rtype: int
.. testsetup::
from <%= rootPackage %>.<%= mainModule %> import example
>>> example(1, 2)
3
>>> example(0, 4)
42
"""
return a + b if a != 0 and b != 0 else 42 |
def defuzz_coa(y, output_shape):
""" Defuzzification using the center of area"""
left = output_shape[0]
center = output_shape[1]
right = output_shape[2]
area = .5 * (right - left)
# scale the output membership function using rule weight
scaled_function = center * y * area
return scaled_function, area |
def ascii64_paths(tnum):
"""
Return components of the path to the data for trigger `tnum`, both at
the COSSC FTP site and locally: (group, fname, remote).
group name of the group directory containing the trigger directory
rfname name of the remote ASCII data file, "cat64ms.<tnum>"
remote tail of the ftp path @ COSSC, "<group>/<fname>"
"""
# ASCII data is in groups spanning a trigger range of 1000.
fac = tnum//1000
u = fac*1000
group = 'trig%05d' % u
rfname = 'cat64ms.%05d' % tnum
remote = group + '/' + rfname # don't use join; not a local path
return group, rfname, remote |
def get_cifar10_labels(labels):
"""Get text labels for cifar10."""
text_labels = ['airplane', 'car', 'bird' 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
return [text_labels[int(i)] for i in labels] |
def decode_sflow_data_source(sflow_data_source):
"""Decodes a sflow_data_source as described in the sFlow v5
spec."""
# source type should be one of
# 0 = ifIndex
# 1 = smonVlanDataSource
# 2 = entPhysicalEntry
source_type = sflow_data_source >> 24
value = sflow_data_source & 0xfff
return (source_type, value) |
def config_retry_strategy(retry):
"""Generate retry strategy."""
if not isinstance(retry, int):
raise ValueError("Parameter retry should be a number")
return {"limit": retry, "retryPolicy": "Always"} |
def hailstone(n):
"""Print out the hailstone sequence starting at n, and return the
number of elements in the sequence.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
"*** YOUR CODE HERE ***"
def hflist(n, seqlen):
print(n)
seqlen += 1
if n == 1:
return seqlen
elif n % 2 == 1:
n = 3 * n + 1
return hflist(n, seqlen)
else:
n = n // 2
return hflist(n, seqlen)
return hflist(n, seqlen = 0) |
def filter_url_extension(url: str) -> bool:
""" filter extension in url """
end_path = url.split("/")[-1].lower()
path_ext = end_path.split(".")[-1].lower()
if path_ext == end_path:
# this url is using route (without any extension)
return True
return path_ext in ["php", "html", "js", "css", "txt"] |
def make_pulse_input(magnitude, start_time, end_time, t_vector):
""" Generates square pulse signal active between start_time and end_time
"""
return [magnitude*(start_time < entry < end_time) for entry in t_vector] |
def parse_slack_activation(message):
"""
text message format:
@batman define g2g
@batman what is FBI
"""
list_of_words = message.split()
try:
abbreviation = list_of_words[list_of_words.index('define') + 1]
except ValueError:
pass
else:
return abbreviation.upper()
try:
abbreviation = list_of_words[list_of_words.index('what is') + 1]
except ValueError:
return None
else:
return abbreviation.upper() |
def scale_min_max(x, xmin, xmax, ymin, ymax):
"""
scales input into integer output range
:param x: the input value to transform
:param xmin: the minimum input range
:param xmax: the maximum input range
:param ymin: the minimum output range
:param ymax: the maximum output range
:return: the scaled output value
"""
y = (x - xmin) / (xmax - xmin)
y *= (ymax - ymin)
y += ymin
y = int(y)
return y |
def is_ltc(dictx):
"""
function to check if the application is LTC.
"""
for key in dictx.keys():
if 'ltc' in key:
return True
return False |
def cross_under(fast_prices, slow_prices, shift=1):
"""True if slow cross over fast"""
last_index = len(slow_prices) - 1 - shift
previous_index = last_index - 1
if fast_prices[last_index] < slow_prices[last_index] and fast_prices[previous_index] > slow_prices[previous_index]:
return True
return False |
def pdf_escape(value):
"""Escape parentheses and backslashes in ``value``.
``value`` must be unicode, or latin1 bytestring.
"""
if isinstance(value, bytes):
value = value.decode('latin1')
return value.translate({40: r'\(', 41: r'\)', 92: r'\\'}) |
def validate_user_name(user_name):
"""Validate if user's name doesn't contain any numbers"""
try:
if not any(letter.isdigit() for letter in user_name):
return True
except:
pass
return False |
def check_desired_parameters(desired_port: dict, port_results: dict) -> bool:
"""
Check if port meets given desired parameters and if then yes return True or else return False.
:param desired_port: Desired parameters that port should meet
:param port_results: Parameters of specific port found by scan
:return: Bool value representing whether desired port parameters match any found port in scan result
"""
for key, value in desired_port.items():
if key in port_results.keys():
if key == 'service':
for service_key, service_value in desired_port[key].items():
if service_key in port_results[key].keys():
if service_value.lower() != port_results[key][service_key].lower():
return False
else:
if key == 'cpe':
cpes = []
for cpe in port_results['cpe']:
cpes.append(cpe['cpe'])
if value not in cpes:
return False
else:
if value.lower() != port_results[key].lower():
return False
return True |
def compute_extended_start_end(start_msec, end_msec, max_end_msec,
fraction_extra):
"""Given the start and end time, compute the new_start_msec
and new_end_msec such that the clip specified by
them is fraction_extra more than the original clip
specified by start_msec and end_msec given that
the calculated bounds are within the bounds of the video
As an example:
if end_msec - start_msec is 100 then
new_end_msec - new_start_msec is 150
if fraction_extra is 0.5
"""
duration = end_msec - start_msec
msec_extra = duration * fraction_extra / 2
# prevent our adding from going over
new_end_msec = min(end_msec + msec_extra, max_end_msec)
# prevent our subtracting from going under
new_start_msec = max(0, start_msec - msec_extra)
return new_start_msec, new_end_msec |
def get_colors(num):
"""get sample colors
Args:
num(int): number of colors to return
Returns:
list: list of sample colors
"""
color = "#8FAADC"
return [color for i in range(num)] |
def _split_field(field):
"""
SIMPLE SPLIT, NO CHECKS
"""
return [k.replace("\a", ".") for k in field.replace("\\.", "\a").split(".")] |
def class_to_str(cls) -> str:
"""Get full qualified name for a given class."""
if cls.__module__.startswith("pydantic.dataclass"):
cls = cls.__mro__[1]
return class_to_str(cls)
return f"{cls.__module__}.{cls.__qualname__}" |
def to_bytes(str):
"""Convert a text string to a byte string"""
return str.encode('utf-8') |
def isstdiofilename(pat):
"""True if the given pat looks like a filename denoting stdin/stdout"""
return not pat or pat == b'-' |
def consify(ctx, lst):
"""Given a list, construct pairs starting from the end"""
if len(lst) == 1:
return lst[0]
if len(lst) == 2:
return ctx['c('](lst[-2], lst[-1])
return consify(ctx, lst[:-2] + [consify(ctx, lst[-2:])]) |
def get_neighbor_satellite(
sat1_orb,
sat1_rel_id,
sat2_orb,
sat2_rel_id,
sat_positions,
num_orbits,
num_sats_per_orbit):
"""
Get satellite id of neighboring satellite
:param sat1_orb: Orbit id of satellite
:param sat1_rel_id: Relative index of satellite within orbit
:param sat2_orb: Relative orbit of neighbor
:param sat2_rel_id: Relative index of neighbor
:param sat_positions: List of satellite objects
:param num_orbits: Number of orbits
:param num_sats_per_orbit: Number of satellites per orbit
:return: satellite id of neighboring satellite
"""
neighbor_abs_orb = (sat1_orb + sat2_orb) % num_orbits
neighbor_abs_pos = (sat1_rel_id + sat2_rel_id) % num_sats_per_orbit
sel_sat_id = -1
for i in range(0, len(sat_positions)):
if sat_positions[i]["orb_id"] == neighbor_abs_orb and sat_positions[i]["orb_sat_id"] == neighbor_abs_pos:
sel_sat_id = i
break
return sel_sat_id |
def find_median(list):
"""find the median in a list"""
if(len(list) % 2 == 0):
median_index = len(list)//2 + 1
else:
median_index = len(list)//2
return list[median_index] |
def _cleanUpSearchResults(search_results, total):
"""
Helper function for _searchRepos(). Clean up search results.
GIVEN:
search_results (list) -- list of search results
total (int) -- total number of search results to return
RETURN:
results (list) -- cleaned up search results
"""
results = list()
for res in search_results:
element = [
res["node"]["url"],
res["node"]["stargazerCount"]
]
if res["node"]["primaryLanguage"] is None:
element.append("None")
else:
element.append(res["node"]["primaryLanguage"]["name"])
results.append(element)
if total == 0:
return results
else:
return results[:total] |
def prepare_messages(file, parts):
"""
Prepara i pezzi di output atteso ciascuno contenente indice,
file di origine del contenuto, mezzo di comunicazione attraverso cui e' arrivato il messaggio
e il contenuto del messaggio stesso.
:param str file: percorso file
:param list parts: parti che compongono il messaggio
:return list: lista di 4 messaggi che contengono il contenuto di <file>
"""
channels = ["FIFO1", "FIFO2", "MsgQueue", "ShdMem"]
messages = []
for i, part in enumerate(parts):
message = "[Parte {}, del file {} spedita dal processo # tramite {}]\n{}\n\n".format(i+1, file, channels[i], part)
messages.append(message)
#print(message)
#print("-"*50)
return messages |
def rev_name(value): # Only one argument.
"""Converts a string into all lowercase"""
return value[::-1] |
def byte_to_mb(size_in_bytes: float) -> int:
"""Instead of a size divisor of 1024 * 1024 you could use the
bitwise shifting operator (<<), i.e. 1<<20 to get megabytes."""
MBFACTOR = float(1 << 20)
return int(int(size_in_bytes) / MBFACTOR) |
def Intersect_list(L1, L2):
"""Efficient intersection implementation"""
# instead of turning all lists into sets and using & operator
# we turn the largest list to a set and then search for duplicates
# in the other one
if len(L1) > len(L2):
L = L2
temp_set = set(L1)
else:
L = L1
temp_set = set(L2)
return [val for val in L if val in temp_set] |
def capitalize_thing(obj: str):
"""It makes all the first letter of each word"""
return ' '.join(i[0].upper() + i[1:] for i in obj.split()) |
def get_longest_common_subseq(data, get_all_subseqs=False):
"""
Adapted from http://stackoverflow.com/a/28869690
The get_all_subseqs parameter was added.
:param data: a list of iterables
:param get_all_subseqs: returns all the subsequences if True
:return:
- the longest common subsequence
- None if the two sequences are equal
- [] if there is no subsequence
- True if possible_subseq == seq
"""
def is_subseq(possible_subseq, seq):
if len(possible_subseq) > len(seq):
return False
def get_length_n_slices(n):
for i in range(len(seq) + 1 - n):
yield seq[i:i + n]
for slyce in get_length_n_slices(len(possible_subseq)):
if slyce == possible_subseq:
return True
return False
def is_subseq_of_any(find, data):
if len(data) < 1 and len(find) < 1:
return False
for i in range(len(data)):
if not is_subseq(find, data[i]):
return False
return True
substr = []
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
potential_subseq = data[0][i:i+j]
if is_subseq_of_any(potential_subseq, data):
if not get_all_subseqs and j > len(substr):
substr = potential_subseq
if get_all_subseqs:
substr.append(potential_subseq)
return substr |
def ChangeExtension(file_name, old_str, new_str):
"""
Change the 'extension' at the end a file or directory to a new type.
For example, change 'foo.pp' to 'foo.Data'
Do this by substituting the last occurance of 'old_str' in the file
name with 'new_str'. This is required because the file name may contain
'old_str' as a legal part of the name which is not at the end of the
directory name.
This assumes anything after the last '.' is the file extension. If there
isn't a file extention, then just return the original name.
"""
pos = file_name.rfind(old_str)
if pos == -1:
# old_str not found, just return the file_name
#
return file_name
return file_name[:pos] + new_str |
def valid_tcp_udp_port(num: int):
"""
Returns True if num is a valid TCP/UDP port number, else return False.
"""
if type(num) == int:
if 1 <= int(num) <= 65535:
return True
return False |
def join_paths(elements):
"""Join paths by ;"""
return ";".join(map(str, elements)) |
def format_dB(num):
"""
Returns a human readable string of dB. The value is divided
by 10 to get first decimal digit
"""
num /= 10
return f'{num:3.1f} {"dB"}' |
def to_skip_because_of_targets_parameters(target_name, lines, targets_from_cli):
"""--targets parameter is in use, we skip the targets not part of the list."""
combined = lines + [target_name]
if targets_from_cli and not set(combined) & set(targets_from_cli):
return True
return False |
def next_multiple(a, b):
""" Finds a number that is equal to or larger than a, divisble by b """
while not (a%b == 0):
a += 1
return a |
def get_quarter(datetime_in):
"""
Return the quarter (1-4) based on the month.
Input is either a datetime object (or object with month attribute) or the month (1-12).
"""
try:
return int((datetime_in.month - 1) / 3) + 1
except AttributeError:
return int((datetime_in - 1) / 3) + 1 |
def maccioni(b4, b5, b7):
"""
Vegetation Index proposed by Maccioni (Maccioni, Agati, and \
Mazzinghi, 2001).
.. math:: Maccioni = (b7 - b5)/(b7 - b4)
:param b4: Red.
:type b4: numpy.ndarray or float
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns Maccioni: Index value
.. Tip::
Maccioni, A., Agati, G., Mazzinghi, P. 2001. New vegetation indices \
for remote measurement of chlorophylls based on leaf directional \
reflectance spectra. Journal of Photochemistry and Photobiology \
B: Biology 61(1-2), 52-61. doi:10.1016/S1011-1344(01)00145-2.
"""
Maccioni = (b7 - b5)/(b7 - b4)
return Maccioni |
def toggle_modal_description(n1, n2, is_open):
"""
:return: Open modal callback if user clicks data set description button on tab 1.
"""
if n1 or n2:
return not is_open
return is_open |
def zcount(list) -> float:
"""
returns the number of elements in a list
:param list: list of elements
:return: int representing number of elements in given list
"""
c = 0
for _ in list:
c += 1
return c |
def cycle_co(p1, p2):
"""Implementation of ROW-WISE cycle crossover.
Requires:
p1 (Individual): First parent for crossover.
p2 (Individual): Second parent for crossover.
REnsures
Individuals: Two offspring, resulting from the crossover.
"""
# Offspring placeholders - None values make it easy to debug for errors
offspring1 = []
offspring2 = []
for row in p1:
offspring1.append([None] * len(row))
offspring2.append([None] * len(row))
#iterates through each row of the offspring
for row_idx, row in enumerate(offspring1):
# While there are still None values in offspring, get the first index of
# None and start a "cycle" according to the cycle crossover method
while None in row:
#index of gene in present row
index = row.index(None)
# alternate parents between cycles beginning on second cycle
if index != 0:
p1, p2 = p2, p1
val1 = p1[row_idx][index]
val2 = p2[row_idx][index]
while val1 != val2:
offspring1[row_idx][index] = p1[row_idx][index]
offspring2[row_idx][index] = p2[row_idx][index]
val2 = p2[row_idx][index]
index = p1[row_idx].index(val2)
# In case last values share the same index, fill them in each offspring
offspring1[row_idx][index] = p1[row_idx][index]
offspring2[row_idx][index] = p2[row_idx][index]
return offspring1, offspring2 |
def remap_bipartite_edge_list(edge_list):
"""Create isomoprhic edge list, with labels starting from 0."""
remap_1 = dict()
remap_2 = dict()
new_id_1 = 0
new_id_2 = 0
for e in edge_list:
if remap_1.get(e[0]) is None:
remap_1[e[0]] = new_id_1
new_id_1 += 1
if remap_2.get(e[1]) is None:
remap_2[e[1]] = new_id_2
new_id_2 += 1
return [(remap_1[e[0]], remap_2[e[1]]) for e in edge_list] |
def represents_int(s):
"""Judge whether string s represents an int.
Args:
s(str): The input string to be judged.
Returns:
bool: Whether s represents int or not.
"""
try:
int(s)
return True
except ValueError:
return False |
def _get_comments_request(post, sort_mode, max_depth, max_breadth):
"""
Used to build the request string used by :func:`get_comments`.
:param str post: The unique id of a Post from which Comments will be returned.
:param str sort_mode: The order that the Posts will be sorted by. Options are: "top" (ranked by upvotes minus downvotes), "best" (similar to top, except that it uses a more complicated algorithm to have good posts jump to the top and stay there, and bad comments to work their way down, see http://blog.reddit.com/2009/10/reddits-new-comment-sorting-system.html), "hot" (similar to "top", but weighted by time so that recent, popular posts are put near the top), "new" (posts will be sorted by creation time).
:returns: str
"""
return "http://www.reddit.com/r/all/comments/{}/{}.json?max_depth={}&max_breadth={}".format(post, sort_mode, max_depth, max_breadth) |
def _mock_random_weighted_choice(items, weight_attribute='weight'): # pylint: disable=unused-argument
"""Mock random_weighted_choice."""
# Always select the first element rather than a random one for the sake of
# determinism.
return items[0] |
def dequote(v, dequoted=False, qchar='"'):
"""Remove quotes (qchar) from v if dequoted is False and return
(v, dequoted) where dequoted=True if quotes were removed.
"""
if not dequoted:
if len(v) > 1 and v[0] == qchar and v[-1] == qchar:
v = v[1:-1]
dequoted = True
return v, dequoted |
def length_checker(state_list,current_length):
"""
checks the lenght of every adiabatic state. returns two lists:
the successful and the failed matchings
"""
successful_list =[]
failed_list=[]
for state in state_list:
if state.get_length() == current_length+1:
successful_list.append(state)
elif state.get_length() != current_length+1:
failed_list.append(state)
return successful_list,failed_list |
def eq_or_in(val, options):
"""Return True if options contains value or if value is equal to options."""
return val in options if isinstance(options, tuple) else val == options |
def parse_webhook(webhook):
"""
Parse out some of the important basics of any webhook.
:param webhook:
:return:
"""
# Load the basics from the webhook
repo_url = webhook["repository"]["url"]
repo_name = webhook["repository"]["name"]
pusher_username = webhook["pusher"]["name"]
commit = webhook["after"]
before = webhook["before"]
ref = webhook["ref"]
return (
repo_url,
repo_name,
pusher_username,
commit,
before,
ref,
) |
def remove_comments(line):
"""
double_quotes = line.count("\"") - line.count("\\\"")
single_quotes = line.count("'") - line.count("\\'")
if double_quotes == 0 and single_quotes == 0:
return list(line.split("#"))[0]
mod_double_quotes = double_quotes % 2
mod_single_quotes = single_quotes % 2
if mod_single_quotes == 0 and single_quotes > 0:
if "\"\"\"" not in line and "'" in line:
line = line.replace("\"", "\\\"").replace("'", "\"")
"""
cleaned = []
previous = ""
quoted = False
skip = False
for character in line:
if not skip:
if character == "\"" and previous != "\\":
quoted = (not quoted)
if not quoted and character == "#":
skip = True
if not skip:
cleaned.append(character)
previous = character
return "".join(cleaned) |
def create_authorization_header(token_or_dict):
"""Create a Bearer Authorization header from token.
Takes either a token_dict as returned by create_web_token or a token
directly.
"""
if isinstance(token_or_dict, dict):
token = token_or_dict['token']
else:
token = token_or_dict
if not isinstance(token, str):
# PY2 jwt 2.0 (PY2 only) returns a string here
# jwt 2.0 should be required if we drop PY2 support.
token = token.decode('ascii')
return ('Authorization', 'Bearer {}'.format(token)) |
def get_words_from_sentences(sentences):
""" Returns a list of words from a list of sentences.
Args:
sentences: A list of sentences
Returns:
A list of words as they appear in the input.
"""
words = []
for sentence in sentences:
sentence_words = sentence.split()
words.extend(sentence_words)
return words |
def lifecycle_hooks(request, testconfig):
"""List of objects with hooks into app/svc creation and deletion
Hooks should implement methods defined and documented in testsuite.lifecycle_hook.LifecycleHook
or should inherit from that class"""
defaults = testconfig.get("fixtures", {}).get("lifecycle_hooks", {}).get("defaults")
if defaults is not None:
return [request.getfixturevalue(i) for i in defaults]
return [] |
def Time2FrameNumber(t, ori_fps, fps=10):
""" function to convert segment annotations given in seconds to frame numbers
input:
ori_fps: is the original fps of the video
fps: is the fps that we are using to extract frames from the video
num_frames: is the number of frames in the video (under fps)
t: is the time (in seconds) that we want to convert to frame number
output:
numf: the frame number corresponding to the time t of a video encoded at fps
"""
ori2fps_ratio = int(ori_fps / fps)
ori_numf = t * ori_fps
numf = int(ori_numf / ori2fps_ratio)
return numf |
def show_instruction(ticker):
"""
Displays initial instruction based on ticker insertion
:param ticker: the inserted ticker
:return: dict setting visibility of instruction
"""
if not ticker:
return {
'margin-top':'25%',
'textAlign':'center',
'color':'#9C9C9C',
'display':'block'
}
return {
'display':'none'
} |
def find_least_common_number(a, b, c):
"""Find least common number in the three given sorted array.
Return -1 if there is no common number.
Time: O(n)
Space: O(1)
"""
i = j = k = 0
while i < len(a) and j < len(b) and k < len(c):
x, y, z = a[i], b[j], c[k]
if x == y == z:
return x
m = max(x, y, z)
if x < m:
i += 1
if y < m:
j += 1
if z < m:
k += 1
return -1 |
def bytes_in_block(block_size: int, i: int) -> slice:
"""
Given the block size and the desired block index,
return the slice of interesting bytes.
:param block_size: The block size.
:param i: The block index.
:return: slice of bytes pointing to given block index.
"""
return slice(block_size * i, block_size * (i + 1)) |
def avg_score(entity):
""" Returns the average score of a list of entities."""
return sum(entity["score"]) / len(entity["score"]) |
def interpret_flags(bitmask, values):
"""
Args:
bitmask: flags to check
values: array of tuples containing (value, description) pairs
Returns: string containing descriptions of flags
"""
return ', '.join(desc for num, desc in values if num & bitmask) if bitmask else None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.