content stringlengths 42 6.51k |
|---|
def calculate_mag(raw_val):
"""
Converts the raw value read from the magnetometer.
Currently does not take calibration into consideration.
"""
result = raw_val * 2 / ((2**15) - 1)
return result |
def convert_c2f(c_in):
"""Convert the value in temp_data from Celsius to Fahrenheit
and store the result in out-data."""
return (c_in * 1.8) + 32 |
def convert_to_eth(wei):
"""
Convert wei to eth
:param wei: float, required
:return: float
"""
eth = wei / 1000000000000000000
return eth |
def count(s, sub, i = 0, last=None):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
Slen = len(s) # cache this value, for speed
if last is None:
last = Slen
elif last < 0:
last = max(0, last + Slen)
elif last > Slen:
last = Slen
if i < 0: i = max(0, i + Slen)
n = len(sub)
m = last + 1 - n
if n == 0: return m-i
r = 0
while i < m:
if sub == s[i:i+n]:
r = r+1
i = i+n
else:
i = i+1
return r |
def with_dot(value):
"""
:param value:
:return:
"""
return value & ~(1 << 7) |
def _stringify(**parameters):
"""Converts query parameters to a query string."""
return "&".join([f"{k}={v}" for k, v in parameters.items() if v is not None]) |
def clean_response(response):
"""Remove a few info from the response before writing cassettes."""
remove_headers = {"Set-Cookie", "Date", "P3P"}
if isinstance(response["headers"], dict):
# Normal client stores headers as dict
for header_name in remove_headers:
response["headers"].pop(header_name, None)
elif isinstance(response["headers"], list):
# Tornado client stores headers as a list of 2-tuples
response["headers"] = [
(name, value)
for name, value in response["headers"]
if name not in remove_headers
]
return response |
def _is_array(data):
"""Return True if object implements all necessary attributes to be used
as a numpy array.
:param object data: Array-like object (numpy array, h5py dataset...)
:return: boolean
"""
# add more required attribute if necessary
for attr in ("shape", "dtype"):
if not hasattr(data, attr):
return False
return True |
def print_selected_post(choice, orderedArray):
""""
Function to format the results of search post
"""
vno = ''
for posted in orderedArray:
if posted[0] == choice:
print('postID: ', posted[0])
print('postDate: ', posted[1])
print('postTitle: ', posted[2])
print('postBody: ', posted[3])
print("userID: ", posted[4])
print("tags: ", posted[5])
print("# of votes: ", posted[6])
print("# of answers: ", posted[7])
vno = posted[6]
break
if vno == None:
return 0
else:
return vno |
def grouping2sql(column, recodedict):
"""
Takes a dict and transforms it to a SQL statement for recoding levels
"""
sql_when = ["case"]
sql_when += [f"when {column} = '{index}' then '{level}'" for index,level in recodedict.items()]
sql_when += ["else '-1' end"]
sql_when = " ".join(sql_when)
return sql_when |
def create_token_request_payload(auth_code, redirect_uri, optional_token_request_params):
"""
Construct payload for access token request
"""
token_request_payload = {
# Don't include client_id param: Verizon doesn't like it
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': redirect_uri,
**optional_token_request_params
}
return token_request_payload |
def _compute_position(input, index):
"""Compute line/column position given an index in a string."""
line = 1
col = 1
eol = None # last end of line character
for c in input[:index]:
if c == '\n' or c == '\r':
if eol is None or eol == c:
eol = c
line += 1
col = 1
else:
# ignore second of '\n\r' and '\r\n' sequences
eol = None
else:
col += 1
return (line, col) |
def str_to_bin(string):
""" take a string and return a list of integers (1 or 0) representing that
string in ASCII """
ret = list(string)
# convert to binary representation
ret = ['{:07b}'.format(ord(x)) for x in ret]
# split the binary into
ret = [[bit for bit in x] for x in ret]
# flatten it and convert to integers
ret = [int(bit) for sublist in ret for bit in sublist]
return ret |
def value_to_str(x) -> str:
"""Convert value to string with suffix"""
suffix = 'bytes'
result = f'{x:.0f} {suffix}'
if x >= 1024:
x /= 1024
suffix = 'Kb'
result = f'{x:.1f} {suffix}'
return result |
def split_global_comment(lines):
"""Split top comments into global and first glyph comment."""
while lines and not lines[-1]:
lines = lines[:-1]
try:
splitter = lines[::-1].index('')
except ValueError:
global_comment = lines
lines = []
else:
global_comment = lines[:-splitter-1]
lines = lines[-splitter:]
return global_comment, lines |
def cremona_letter_code(n):
"""
Returns the Cremona letter code corresponding to an integer. For
example, 0 - a 25 - z 26 - ba 51 - bz 52 - ca 53 - cb etc.
.. note::
This is just the base 26 representation of n, where a=0, b=1,
..., z=25. This extends the old Cremona notation (counting from
0) for the first 26 classes, and is different for classes above
26.
INPUT:
- ``n`` (int) -- a non-negative integer
OUTPUT: str
EXAMPLES::
sage: from sage.databases.cremona import cremona_letter_code
sage: cremona_letter_code(0)
'a'
sage: cremona_letter_code(26)
'ba'
sage: cremona_letter_code(27)
'bb'
sage: cremona_letter_code(521)
'ub'
sage: cremona_letter_code(53)
'cb'
sage: cremona_letter_code(2005)
'czd'
TESTS::
sage: cremona_letter_code(QQ)
Traceback (most recent call last):
...
ValueError: Cremona letter codes are only defined for non-negative integers
sage: cremona_letter_code(x)
Traceback (most recent call last):
...
ValueError: Cremona letter codes are only defined for non-negative integers
sage: cremona_letter_code(-1)
Traceback (most recent call last):
...
ValueError: Cremona letter codes are only defined for non-negative integers
sage: cremona_letter_code(3.14159)
Traceback (most recent call last):
...
ValueError: Cremona letter codes are only defined for non-negative integers
"""
try:
m = int(n)
if n == m:
n = m
else:
n = -1
except (ValueError, TypeError):
n = -1
if n<0:
raise ValueError("Cremona letter codes are only defined for non-negative integers")
if n == 0:
return "a"
s = ""
while n != 0:
s = chr(n%26+97) + s
n //= 26
return s |
def process_phone(number):
""" Process Phone Number for Printing (Currently works for USA Numbers) """
if len(number) == 12 and number[0:2] == '+1':
return '+1(%s)%s-%s' % (number[2:5], number[5:8], number[8:12])
else:
return number |
def nombreLignes(source):
"""returns the number of line of the source file"""
try:
with open(source, "r", encoding="utf-8") as f:
return len(f.readlines())
except IOError:
print("Lecture du fichier", source, "impossible.")
return 0 |
def filesafe(str_):
"""Convert a string to something safe for filenames."""
return "".join(c for c in str_ if c.isalnum() or c in (' ', '.', '_', '-')).rstrip() |
def get_submodel_name(history = 60, lag = 365, num_neighbors = 20, margin_in_days = None, metric = "cos"):
"""Returns submodel name for a given setting of model parameters
"""
submodel_name = '{}-autoknn-hist{}-nbrs{}-margin{}-lag{}'.format(metric,
history,
num_neighbors,
margin_in_days,
lag)
return submodel_name |
def trackOpposite(f_tr):
"""
DOCUMENT ME!
"""
l_d = f_tr + 180.
while l_d >= 360.:
l_d -= 360.
# return
return l_d |
def fix_attributes(attrs):
"""
Normalise and clean up the attributes, and put them in a dict.
"""
result = {}
for attr, value in attrs:
attr = attr.lower()
if value is None:
result[attr] = True # The attribute is present, but has no value
continue
if attr in ("rel", "type"):
value = value.lower()
result[attr] = value.strip()
return result |
def underscore(s: str) -> str:
"""
Python packaging is very inconsistent. Even though this package's
name is "chris_plugin", its _distribution's_ name might appear as
"chris-plugin" in some situations but not all.
e.g. when the plugin is installed via:
`pip install -e ` => d.requires = ['chris_plugin']
`pip install --use-feature=in-tree-build .` => d.requires = ['chris_plugin']
:param s: string
:return: given string with '-' replaced by '_'
"""
return s.replace("-", "_") |
def identical(s1, s2):
"""
Verify that the nodes in {s1} and {s2} are identical. This has to be done carefully since
we must avoid triggering __eq__
"""
# for the pairs
for n1, n2 in zip(s1, s2):
# check them for _identity_, not _equality_
if n1 is not n2: return False
# all done
return True |
def make_compact(creation_sequence):
"""
Returns the creation sequence in a compact form
that is the number of 'i's and 'd's alternating.
Examples:
[1,2,2,3] represents d,i,i,d,d,i,i,i.
[3,1,2] represents d,d,d,i,d,d.
Notice that the first number is the first vertex
to be used for construction and so is always 'd'.
Labeled creation sequences lose their labels in the
compact representation.
"""
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
cs = creation_sequence[:]
elif isinstance(first,tuple): # labeled creation sequence
cs = [ s[1] for s in creation_sequence ]
elif isinstance(first,int): # compact creation sequence
return creation_sequence
else:
raise TypeError("Not a valid creation sequence type")
ccs=[]
count=1 # count the run lengths of d's or i's.
for i in range(1,len(cs)):
if cs[i]==cs[i-1]:
count+=1
else:
ccs.append(count)
count=1
ccs.append(count) # don't forget the last one
return ccs |
def ensureList(x):
"""
If C{x} is a string, put it in a list, otherwise return the
argument unchanged
"""
if isinstance(x, type('')):
return [x]
else:
return x |
def flip_bit(bit: str) -> str:
"""returns the input bit flipped"""
assert bit == "0" or bit =="1"
return "0" if bit =="1" else "1" |
def iseven(x):
"""
Returns True if x is even
Parameter x: The number to add to
Precondition: x is an int
"""
return x % 2 == 0 |
def topological_sort_using_dfs(graph: list, vertices: int) -> list:
"""
Graph must be a DAG. No need to verify for existing elements
Time complexity: O(V+E)
Space complexity: O(V)
"""
def dfs(ind):
nonlocal visited
visited.add(ind)
for vertex, connected in enumerate(graph[ind]):
if connected == 1 and vertex not in visited:
dfs(vertex)
stack.append(ind)
visited: set = set()
stack: list = []
for index in range(vertices):
if index not in visited:
dfs(index)
return stack[::-1] |
def check_key_types(jsonObject, key, types):
"""
@brief check the dict key and type
@param jsonObject The json object
@param key The key
@param types tuple of types
@return True/False
"""
if not isinstance(jsonObject, dict):
return False
if not (key in jsonObject):
# s = "%s not found in jsonObject" % key
# logger.debug(s)
return False
if not isinstance(jsonObject[key], types):
# s = "jsonObject['%s'] is not in types %s" % (key, str(types))
# logger.debug(s)
return False
return True |
def prune_none(data):
"""removes keys where the value is `None`,
i.e. metrics which could not be computed"""
return {k: v for k, v in data.items() if v is not None} |
def score_mini_table(id, scores):
""" Build the HTML table listing the Associated PGS. """
score_html = ''
if scores:
score_html += '<a class="toggle_btn" id="{}_scores"><i class="fa fa-plus-circle"></i></a>'.format(id)
score_html += '<div class="toggle_content" id="list_{}_scores" style="display:none">'.format(id)
score_html += """<table class="table table-striped table_pgs_score_results mt-2">
<thead class="thead-light">
<tr><th>PGS ID</th><th>PGS Name</th><th>Reported Trait</th></tr>
</thead>
<tbody>"""
scores.sort(key=lambda x: x.trait_reported, reverse=False)
for score in scores:
score_html += '<tr><td><a href="/score/{}">{}</a></td><td>{}</td><td>{}</td></tr>'.format(score.id, score.id, score.name, score.trait_reported)
score_html += '</tbody></table></div>'
return score_html |
def mix_arrays(a, b, factor):
"""Basic linear interpolation, factor from 0.0 to 1.0"""
return a + max(min(factor, 1.0), 0.0) * (b - a) |
def stringKeys(dictionary):
"""
Modifies the passed in dictionary to ensure all keys are string objects, converting them when necessary.
"""
for key, value in dictionary.items():
if type(key) != str:
dictionary.pop(key)
dictionary[str(key)] = value
return dictionary |
def _ibabs_to_dict(o, fields, excludes=[]):
"""
Converts an iBabs SOAP response to a JSON serializable dict
"""
output = {}
for f in fields.keys():
if f in excludes:
continue
v = getattr(o, f)
if fields[f] is not None:
if v is not None:
output[f] = fields[f](v)
else:
output[f] = v
else:
output[f] = v
return output |
def knapsack_dp(capacity, weights, values):
""" Function to find the max value capacity of a knapsack
Args:
capacity: max weight knapsack can carry
weights: weight array of individual indexed item
values: value associated with the items in same item order as weights
Returns:
return the max value knapsack can contain
"""
no_of_items = len(weights)
# create a dp table to cache the subproblems with dimension (capacity * weights)
table = [[0 for i in range(capacity + 1)] for j in range(no_of_items + 1)]
# fill the table in bottom up manner with index (1, 1) using recurence relation
for i in range(1, no_of_items + 1):
for item_wt in range(1, capacity + 1):
# check if the current weight satisfies the weight constraint
if weights[i - 1] <= item_wt:
# fill the current index either by not choosing the current item case 1 or
#choose the current item and reduce the current weight and inc the value
table[i][item_wt] = max(
# Case 1: item not chosen -> value upto previous item is considered
table[i - 1][item_wt],
# Case 2: item chosen -> inc value and dec capacity
table[i - 1][capacity - weights[i - 1]] + values[i - 1],
)
else:
# copy the previous row value as current value
table[i][item_wt] = table[i - 1][item_wt]
print(table)
# since the bottom right index gives the optimal solution of the whole problem
return table[no_of_items][capacity] |
def check_if_neighbors_match(src_neighbor, trg_neighbor):
"""Check if any source and target neighbors match and return matches
Args:
src_neighbor (list): Source Neighbor List
trg_neighbor (list): Target Neighbor List
Returns:
list: Matching of neighbors.
"""
matching = {}
for current_neighbor_index in range(len(src_neighbor)):
# Looking for matches
if int(trg_neighbor[src_neighbor[current_neighbor_index]]) == current_neighbor_index:
matching[current_neighbor_index] = src_neighbor[current_neighbor_index]
return matching |
def getPositions(mask):
"""
Get a list of positions where the specified
mask has the bit set
"""
# XXX I don't exactly love this implementation,
# but it works.
binaryString = bin(mask)[2:]
result = []
for index, c in enumerate(binaryString[::-1]):
if int(c):
result.append(index)
return result |
def contains_no_complete_reductions(reductions):
"""Checks whether reductions contains a reduction with the empty word."""
for reduction in reductions:
if reduction[-1] == "":
return False
return True |
def LSTMCellWeightsShape(num_inputs, num_nodes):
"""Returns the shape of the weights for a single LSTM cell."""
# Dimension 0 accounts for combining x with the previous m state.
# Dimension 1 accounts for the in value and the (in, forget, out) gates.
return [num_inputs + num_nodes, 4 * num_nodes] |
def tokenize_grapheme_langid(enhanced_word):
"""
tokenizer the langid of word (For multilingual GBERT)
"""
index_over = enhanced_word.index('}')
lang_id = [enhanced_word[:index_over + 1]]
return lang_id |
def remcheck(val, range1, range2):
"""
Checks whether value is within range of two decimals.
Parameters
----------
val : Float
Value to be checked.
range1 : Float
Decimal 1.
range2 : Float
Decimal 2.
Returns
-------
bool
Result of check.
"""
# function checks whether value is within range of two decimels
if (range1 < range2):
if (val > range1) and (val < range2):
return True
else:
return False
else:
if (val > range1) or (val < range2):
return True
else:
return False |
def d_x_diffr_dy(x, y):
"""
derivative of d(x/r)/dy
equivalent to second order derivatives dr_dyx
:param x:
:param y:
:return:
"""
return -x*y / (x**2 + y**2)**(3/2.) |
def array_diff(a, b):
"""Return the values in a that are not in b."""
tbr = []
for idx, number in enumerate(a):
if number in b:
tbr.append(idx)
for idx in tbr[::-1]:
a.pop(idx)
return a |
def _is_filepath(output_stream):
"""Returns True if output_stream is a file path."""
return isinstance(output_stream, str) and output_stream.startswith("file://") |
def MIN(src_column):
"""
Builtin minimum aggregator for groupby
Example: Get the minimum rating of each user.
>>> sf.groupby("user",
... {'rating_min':tc.aggregate.MIN('rating')})
"""
return ("__builtin__min__", [src_column]) |
def translate_alignment(align):
"""
Decodes an integer into a tuple for horizontal and vertical height
:param align: alignment integer to decode
"""
h = v = 0
bits = (align & 0x38) >> 3
if bits & 0x4 == bits:
v = 0x1 # top
elif bits & 0x2 == bits:
v = 0x10 # center
elif bits & 0x1 == bits:
v = 0x2 # bottom
else:
return h, v
bits = align & 0x7
if bits & 0x4 == bits:
h = 0x4 # left
elif bits & 0x2 == bits:
h = 0x10 # center
elif bits & 0x1 == bits:
h = 0x8 # right
else:
return h, v
return h, v |
def factorial(n):
"""
Calculate n!
Args:
n(int): factorial to be computed
Returns:
n!
"""
if n == 0:
return 1 # by definition of 0!
return n * factorial(n-1) |
def sort_table(matcher_type, matcher_map):
"""Returns the sorted html table for the given row map."""
table = ''
for key in sorted(matcher_map.keys()):
table += matcher_map[key] + '\n'
return ('<!-- START_%(type)s_MATCHERS -->\n' +
'%(table)s' +
'<!--END_%(type)s_MATCHERS -->') % {
'type': matcher_type,
'table': table,
} |
def get_threshold(max_ep, threshold):
"""
Returns actual threshold values based on:
- Max EP
- Threshold percentage
"""
return round(max_ep/100 * threshold) |
def to_chr(x):
"""chr(x) if 0 < x < 128 ; unicode(x) if x > 127."""
return 0 < x < 128 and chr(x) or eval("u'\\u%d'" % x) |
def _collection_spec(collection=None, revision=None) -> str:
"""
Return a template string for a collection/revision regular expression. Because both are
optional in the ALF spec, None will match any (including absent), while an empty string will
match absent.
Parameters
----------
collection : None, str
An optional collection regular expression
revision : None, str
An optional revision regular expression
Returns
-------
str
A string format for matching the collection/revision
"""
spec = ''
for value, default in zip((collection, revision), ('{collection}/', '#{revision}#/')):
if not value:
default = f'({default})?' if value is None else ''
spec += default
return spec |
def combineListResponces(responces):
"""Combines a list of device or datanode responces and returns the combined results."""
responces = [resp for resp in responces if resp is not None]
items = []
for responce in responces:
for item in responce.get("content", {}).get("items", []):
items.append(item)
return items |
def tabulated_fibonacci(n):
"""Returns the nth fibonacci number
Time complexity: O(n)
Parameters
----------
n : int
the nth fibonacci position
Returns
-------
int
the nth fibonacci number
-------
>>> tabulated_fibonacci(0)
0
>>> tabulated_fibonacci(1)
1
>>> tabulated_fibonacci(3)
2
"""
if n < 0:
raise ValueError("Value cannot be negative")
if n == 0:
return 0
if n == 1:
return 1
cache = [0 for _ in range(n + 1)]
cache[1] = 1
i = 2
while i <= n:
cache[i] = cache[i - 1] + cache[i - 2]
i += 1
return cache[n] |
def basename(file_name):
"""
Extract base name from file_name.
`basename("test.e") -> "test"`
"""
fileParts = file_name.split(".")
base_name = ".".join(fileParts[:-1])
return base_name |
def shifted_list(l):
"""
Return the shifted (normalized) list of list l.
A shifted/normalized is a list which starts with 0 and ends with
last(l)-l[0]
"""
return [l[i] - l[0]+1 for i in range(len(l))] |
def fill_kwargs(kwargs):
"""Give the kwargs dict default options."""
defaults = {
"strandedness": None,
"overlap": True,
"how": None,
"invert": None,
"new_pos": None,
"suffixes": ["_a", "_b"],
"suffix": "_b",
"sparse": {
"self": False,
"other": False
}
}
defaults.update(kwargs)
return defaults |
def get_overlap_score(candidate, target):
"""Takes a candidate word and a target word and returns the overlap
score between the two.
Parameters
----------
candidate : str
Candidate word whose overlap has to be detected.
target : str
Target word against which the overlap will be detected
Returns
-------
float
Overlap score betwen candidate and the target.
"""
if len(candidate) < len(target):
temp = candidate
candidate = target
target = temp
overlap = 0.0
while len(target) >= 2:
if target in candidate:
overlap = len(target)
return overlap * 1.0 / len(candidate)
else:
target = target[:-1]
return 0.0 |
def locate_min(a):
"""
Get list of indexes of all minimum value elements of a.
:param a: Iterable
:return: List of indexes
"""
smallest = min(a)
return [index for index, element in enumerate(a) if smallest == element] |
def mappingSightingPatternSTIX(etype):
"""
Map the patterns of stix 2.1 to threat sightings
"""
mapping = {
"sha256": "file:hashes.'SHA-256'",
"ipv4": "ipv4-addr:value",
"domain": "domain-name:value",
"url": "url:value",
"dstHost": "domain-name:value",
"md5": "file:hashes.md5",
"sha1": "file:hashes.'SHA-1'",
"ipv6": "ipv6-addr:value",
"file": "file:name",
"name": "file:name",
"path": "file:parent_directory_ref.path", # Expressed like this C:\\\\Windows
"key": "windows-registry-key:key"
}
return mapping[etype] |
def parse_cli_output(output):
""" helper for testing
parse the CLI --list output and return value of all set attributes as dict """
import re
results = {}
matches = re.findall(r"^(\w+)\s+.*\=\s+(.*)$", output, re.MULTILINE)
for match in matches:
results[match[0]] = match[1]
return results |
def bboxMargin(a):
"""
box margin
:param a:
:return:
"""
return (a[2] - a[0]) + (a[3] - a[1]) |
def num_over_limit(n, limit):
"""
Returns the number of values for n C r that are greater than limit
"""
if n == 1:
if 1 > limit:
return 2
else:
return 0
prod = 1
for r in range(n / 2 - 1):
if prod > limit:
return (n / 2 - r) * 2 + (n % 2) + 1
prod = prod * (n - r) / (r + 1)
if prod > limit:
return 2 + (n % 2) + 1
prod = prod * (n - n / 2 + 1) / (n / 2)
if prod > limit:
return (n % 2) + 1
return 0 |
def readPairInParen(string, startPos):
"""Reads a pair of numbers contained in parenthesis like this: (3413.55, 4103.456)"""
# Find the bounds
startParen = string.find('(', startPos)
commaLoc = string.find(',', startParen)
stopParen = string.find(')', commaLoc)
# Extract the numbers
num1 = float(string[startParen+1:commaLoc-1])
num2 = float(string[commaLoc+1 :stopParen-1])
return (num1, num2) |
def m(o, name, case_insensitive=True):
"""Returns the members of the object or dict, filtered by name."""
members = o.keys() if isinstance(o, dict) else dir(o)
if case_insensitive:
return [i for i in members if name.lower() in i.lower()]
else:
return [i for i in members if name in i] |
def assert_axis_in_bounds(axis: int, ndim: int) -> int:
"""Assert a given value is inside the existing axes of the image.
Returns
-------
axis : int
The axis which was checked for validity.
ndim : int
The dimensionality of the layer.
Raises
------
ValueError
The given axis index is out of bounds.
"""
if axis not in range(-ndim, ndim):
msg = (
f'Axis {axis} not defined for dimensionality {ndim}. '
f'Must be in [{-ndim}, {ndim}).'
)
raise ValueError(msg)
return axis % ndim |
def valid_netbios_name(name):
"""Check whether a name is valid as a NetBIOS name. """
# See crh's book (1.4.1.1)
if len(name) > 15:
return False
for x in name:
if not x.isalnum() and not x in " !#$%&'()-.@^_{}~":
return False
return True |
def average_lines(lines , Avg_ux , Avg_uy , Avg_lx , Avg_ly):
"""
- I will work on averaging the end points
- ***************** we need to find better way to average the lines ******************
:param left_lanes:
:param right_lanes:
:return:
"""
# left lane averging end points
avg_lx = 0
avg_ly = 0
avg_ux = 0
avg_uy = 0
for i in range(0 , len(lines), 1):
# (col , row)
fx , fy , sx , sy = lines[i]
point1 = 0
point2 = 0
# point1 will have the lower point
if fy>sy:
point1 = (fx , fy)
point2 = (sx , sy)
else:
point2 = (sx , sy)
point1 = (fx , fy)
# average the points
avg_lx += point1[0]
avg_ly += point1[1]
avg_ux +=point2[0]
avg_uy +=point2[1]
# calculate moving average , more smooth detection
# the problem here is the bias that we need to correct as
# we did initialize the averages = 0 in the begining.
"""
Avg_lx = int(.9*Avg_lx + .1*avg_lx)
Avg_ly = int(.9*Avg_ly + .1*avg_ly)
Avg_ux = int(.9*Avg_ux + .1*avg_ux)
Avg_uy = int(.9*Avg_uy + .1*avg_uy)
"""
l= len(lines)
return [avg_lx //l , avg_ly //l , avg_ux //l , avg_uy //l] |
def longestPalindrome(s):
"""
:type s: str
:rtype: str
"""
## method-1 violence search, fail on (101/103) case : time limitation, time O(n^2)
def sub_judge(start, end, len):
if start > end:
return len
if start == end:
return len + 1
if s[start] == s[end]:
return sub_judge(start + 1, end - 1, len + 2)
else:
return -1
max_len = 0
max_i = 0
max_j = 0
for i in range(len(s)):
for j in reversed(range(i, len(s))):
if j - i + 1 < max_len: ## corp some search
break
else:
if sub_judge(i, j, 0) > max_len:
max_len = sub_judge(i, j, 0)
max_i = i
max_j = j
if len(s) - i - 1 <= max_len: ## corp some search
return s[max_i:max_j + 1] ## corp some search
return s[max_i:max_j + 1] |
def islower(bb1, bb2):
""" Returns true if obj 1 is lower than obj2.
For obj 1 to be lower than obj 2:
- The the top of its bounding box must be lower than the bottom
of obj 2's bounding box
"""
_, bb1_max = bb1
bb2_min, _ = bb2
x1,y1,z1 = bb1_max
x2,y2,z2 = bb2_min
return z1 < z2 |
def is_feasible(params, test_fixtures):
"""
Checks if the specified parameter and test fixture combination is feasible.
A combination is feasible if none of the test fixture resources appear in
the parameters and if all of the exclusive-use test fixture resources are
only used by one test fixture.
"""
exclusive_tf_resources = []
shared_tf_resources = set()
for r, ex in [(r, ex) for tf in test_fixtures if not tf is None for r, ex in tf.get_resources() if not r is None]:
if ex:
exclusive_tf_resources.append(r)
else:
shared_tf_resources.add(r)
if len(exclusive_tf_resources + list(shared_tf_resources)) > len(set(exclusive_tf_resources).union(shared_tf_resources)):
return False # At least one exclusive-use resource is used twice in the test fixtures
if len(set(exclusive_tf_resources).union(shared_tf_resources).intersection(params)):
return False # At least one test fixture resource appears in the params too
if len(shared_tf_resources.intersection(params)):
return False # At least one test fixture resource appears in the params too
return True |
def check_month(month_number, month_list):
"""
Check if a month (as integer) is in a list of selected months (as strings).
Args:
month_number: The number of the month.
month_list: A list of months as defined by the configuration.
Returns:
Bool.
"""
month_map = {
1: 'january',
2: 'february',
3: 'march',
4: 'april',
5: 'may',
6: 'june',
7: 'july',
8: 'august',
9: 'september',
10: 'october',
11: 'november',
12: 'december',
}
return month_map.get(month_number, '') in month_list |
def key_with_max_val(d):
"""
a) create a list of the dict's keys and values;
b) return the key with the max value
"""
v = list(d.values())
k = list(d)
return k[v.index(max(v))] |
def mix_probability_to_independent_component_probability(mix_probability: float, n: float) -> float:
"""Converts the probability of applying a full mixing channel to independent component probabilities.
If each component is applied independently with the returned component probability, the overall effect
is identical to, with probability `mix_probability`, uniformly picking one of the components to apply.
Not that, unlike in other places in the code, the all-identity case is one of the components that can
be picked when applying the error case.
"""
return 0.5 - 0.5 * (1 - mix_probability) ** (1 / 2 ** (n - 1)) |
def test_path_and_query_parameters(
arg1,
arg2,
):
"""
Use same arg name as the one in path for receiving path args
For those args which names not matched path arg names, will be parsed as query parameter
```python
from django_mini_fastapi import Path
@api.get('/test_path_and_query_parameters/{arg1}')
def test_path_and_query_parameters(arg1, arg2):
return dict(arg1=arg1, arg2=arg2)
```
"""
return dict(arg1=arg1, arg2=arg2) |
def dispatch_across_consumers(products, consumers, rank, even=False):
"""Dispatch products across all consumers.
Args:
products: number of products to be dispatched.
consumers: number of consumers.
rank: rank of this consumer in all consumers.
even: dispatch across consumers evenly if True.
"""
if even:
assert products % consumers == 0, \
"Number of products expects to be divided by number of consumers" \
". while products = {}, consumers = {}.".format(products, consumers)
remainder = products % consumers
divisor = products // consumers
return (divisor + 1) if rank < remainder \
else divisor |
def _IsExtraneousLine(line, send_cmd):
"""Determine if a line read from stdout in persistent shell is extraneous.
The results output to stdout by the persistent shell process
(in PersistentShell below) often include "extraneous" lines that are
not part of the output of the shell command. These "extraneous" lines
do not always appear and are of two forms: shell prompt lines and lines
that just duplicate what the input command was. This function
detects these extraneous lines. Since all these lines have the
original command in them, that is what it detects ror.
Args:
line: Output line to check.
send_cmd: Command that was sent to adb persistent shell.
"""
return send_cmd.rstrip() in line |
def calculate_score(s1, s2, l1, l2, startpoint):
"""calculate alignment scores"""
matched = "" # to hold string displaying alignements
score = 0
for i in range(l2):
# import ipdb; ipdb.set_trace() ## debug breakpoint added
if (i + startpoint) < l1:
if s1[i + startpoint] == s2[i]: # if the bases match
matched = matched + "*"
score = score + 1
else:
matched = matched + "-"
# some formatted output
print("." * startpoint + matched)
print("." * startpoint + s2)
print(s1)
print(score)
return score |
def valueForKeyPath(dict, keypath, default = None):
"""
Get the keypath value of the specified dictionary.
"""
keys = keypath.split('.')
for key in keys:
if key not in dict:
return default
dict = dict[key]
return dict |
def get_dataset_json(met, version):
"""Generated HySDS dataset JSON from met JSON."""
return {
"version": version,
"label": met['data_product_name'],
"starttime": met['sensingStart'],
"endtime": met['sensingStop'],
} |
def is_span(node: dict) -> bool:
"""Check whether a node is a span node."""
return node.get('_type', '') == 'span' or isinstance(node, str) or hasattr(node, 'marks') |
def format_size(size):
"""
Return a human-readable value for the `size` int or float.
For example:
>>> assert format_size(0) == '0 Byte'
>>> assert format_size(1) == '1 Byte'
>>> assert format_size(0.123) == '0.1 Byte'
>>> assert format_size(123) == '123 Bytes'
>>> assert format_size(1023) == '1023 Bytes'
>>> assert format_size(1024) == '1 KB'
>>> assert format_size(2567) == '2.51 KB'
>>> assert format_size(2567000) == '2.45 MB'
>>> assert format_size(1024*1024) == '1 MB'
>>> assert format_size(1024*1024*1024) == '1 GB'
>>> assert format_size(1024*1024*1024*12.3) == '12.30 GB'
"""
if not size:
return '0 Byte'
if size < 1:
return '%(size).1f Byte' % locals()
if size == 1:
return '%(size)d Byte' % locals()
size = float(size)
for symbol in ('Bytes', 'KB', 'MB', 'GB', 'TB'):
if size < 1024:
if int(size) == float(size):
return '%(size)d %(symbol)s' % locals()
return '%(size).2f %(symbol)s' % locals()
size = size / 1024.
return '%(size).2f %(symbol)s' % locals() |
def pp(n, p, l, t):
"""calculate pp,qq for SBM given p from ER
p : p from G(n,p)
l : # of communities
t : ratio of pp/qq
"""
pp = p * n * (n - 1) / (n ** 2 / l - n + t * n ** 2 * (l - 1) / l)
qq = t * pp
return pp, qq |
def case_convert(snakecase_string: str) -> str:
"""Converts snake case string to pascal string
Args:
snakecase_string (str): snakecase string
Returns:
str: Pascal string
"""
return snakecase_string.replace("_", " ").title().replace("Cnn", "CNN") |
def mergesort(items):
"""Sort an input list array.
Args:
items: A list of ints to be sorted from least to greatest
Returns:
merged: A list of sorted ints
"""
if len(items) <= 1:
return items
mid = len(items) // 2
left = items[:mid]
right = items[mid:]
left = mergesort(left)
right = mergesort(right)
merged = []
left_index = 0
right_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(right[right_index])
right_index += 1
else:
merged.append(left[left_index])
left_index += 1
merged += left[left_index:]
merged += right[right_index:]
return merged |
def delist_arguments(args):
"""
Takes a dictionary, 'args' and de-lists any single-item lists then
returns the resulting dictionary.
In other words, {'foo': ['bar']} would become {'foo': 'bar'}
"""
for arg, value in args.items():
if len(value) == 1:
args[arg] = value[0]
return args |
def cocktail_shaker_sort(unsorted):
"""
Pure implementation of the cocktail shaker sort algorithm in Python.
"""
for i in range(len(unsorted)-1, 0, -1):
swapped = False
for j in range(i, 0, -1):
if unsorted[j] < unsorted[j-1]:
unsorted[j], unsorted[j-1] = unsorted[j-1], unsorted[j]
swapped = True
for j in range(i):
if unsorted[j] > unsorted[j+1]:
unsorted[j], unsorted[j+1] = unsorted[j+1], unsorted[j]
swapped = True
if not swapped:
return unsorted |
def count_negatives(nums):
"""Return the number of negative numbers in the given list.
>>> count_negatives([5, -1, -2, 0, 3])
2
"""
nums.append(0)
# We could also have used the list.sort() method, which modifies a list, putting it in sorted order.
nums = sorted(nums)
return nums.index(0) |
def create_stairwaytotravel_url(place_id):
"""Create a url to a place page given the place id."""
return "https://stairwaytotravel.com/explore/" + str(place_id) |
def merge_population(population):
"""
create a merged population representation
"""
return population["feasible"] + population["infeasible"] |
def hash_int_pair(ind1, ind2):
"""Hash an int pair.
Args:
ind1: int1.
ind2: int2.
Returns:
hash_index: the hash index.
"""
assert ind1 <= ind2
return ind1 * 2147483647 + ind2 |
def _auth_callback(userid, request):
""" Get permissions for a userid """
return ['default'] |
def non_negative_validation(value):
"""
Validate if value is negative and raise Validation error
"""
if isinstance(value, list):
if any(v < 0 for v in value):
raise ValueError("The Values in the list must not be negative")
else:
return value
else:
if value < 0:
raise ValueError("The Value must not be negative.")
else:
return value |
def _align_token_list(text, token_list, char_offset=0):
"""Align list of string tokens to text and return list of Token objects."""
token_spans = []
for text_token in token_list:
start = text.index(text_token, char_offset)
token_spans.append((start, start + len(text_token)))
char_offset = start + len(text_token)
return char_offset, token_spans |
def cria_posicao(col, ln): # str x str -> posicao
"""
Recebe duas cadeias de carateres correspondentes a coluna c
e a linha l de uma posicao e devolve a posicao correspondente, se ambos os
argumentos forem validos.
:param col: Coluna, pode ser 'a', 'b' ou 'c'
:param ln: Linha, pode ser '1', '2' ou '3'
:return: Posicao do tabuleiro, representada por um tuplo com dois elementos,
o primeiro sendo a coluna e o segundo a linha, que sao ambos inteiros de 0 a
2, dependendo da posicao que se quer representar.
"""
if col == 'a':
col = 0
elif col == 'b':
col = 1
elif col == 'c':
col = 2
else:
raise ValueError('cria_posicao: argumentos invalidos')
if ln == '1':
ln = 0
elif ln == '2':
ln = 1
elif ln == '3':
ln = 2
else:
raise ValueError('cria_posicao: argumentos invalidos')
return col, ln |
def normal2SD(x,y,z):
"""Converts a normal vector to a plane (given as x,y,z)
to a strike and dip of the plane using the Right-Hand-Rule.
Input:
x: The x-component of the normal vector
y: The y-component of the normal vector
z: The z-component of the normal vector
Output:
strike: The strike of the plane, in degrees clockwise from north
dip: The dip of the plane, in degrees downward from horizontal
"""
from math import asin, atan2, sqrt, degrees
# Due to geologic conventions, positive angles are downwards
z = -z
# First convert the normal vector to spherical coordinates
# (This is effectively a plunge/bearing of the normal vector)
r = sqrt(x*x + y*y + z*z)
plunge = degrees(asin(z/r))
bearing = degrees(atan2(y, x))
# Rotate bearing so that 0 is north instead of east
bearing = 90-bearing
if bearing<0: bearing += 360
# If the plunge angle is upwards, get the opposite end of the line
if plunge<0:
plunge = -plunge
bearing -= 180
if bearing<0:
bearing += 360
# Now convert the plunge/bearing of the pole to the plane that it represents
strike = bearing+90
dip = 90-plunge
if strike > 360: strike -= 360
return strike, dip |
def process_group(grp):
"""
Given a set of instructions, with commands
`acc`, `jmp`, or `nop` and values as some
ints, it is known that if executed,
it falls in an infinite loop.
Compute the accumulated value just before
it falls into recursion.
:return accumulator, i: The accumulated value
and the index of instruction that is repeated.
"""
accumulator = 0
_i = 0
_seen = set()
while True:
_ins, _val = grp[_i].split(" ")
_val = int(_val)
if _i in _seen:
return accumulator
_seen |= {_i}
if _ins == "acc":
accumulator += _val
_i += 1
elif _ins == "jmp":
_i += _val
else:
_i += 1
return accumulator |
def quick_clean(raw_str):
"""
args:
- raw_str: a string to be quickly cleaned
return
- the original string w/ all quotes replaced as double quotes
"""
return raw_str.replace("''", '" ').replace("``", '" ') |
def get_border_bounding_rect(h, w, p1, p2, r):
"""Get a valid bounding rect in the image with border of specific size.
# Arguments
h: image max height.
w: image max width.
p1: start point of rect.
p2: end point of rect.
r: border radius.
# Returns
rect coord
"""
x1, y1, x2, y2 = p1[0], p1[1], p2[0], p2[1]
x1 = x1 - r if 0 < x1 - r else 0
y1 = y1 - r if 0 < y1 - r else 0
x2 = x2 + r + 1 if x2 + r + 1 < w else w
y2 = y2 + r + 1 if y2 + r + 1 < h else h
return x1, y1, x2, y2 |
def sort_big_file(lines):
""" 10.6 Sort Big File: Imagine you have a 20 GB file with one string per
line. Explain how you would sort the file.
Solution: bucket sort, we make a pass through each line and copy it at the
an of new file that starts with the first two letters of the line.
At the end we will have 24*2 file, of more-or-less 0.5GB, which can be sorted in memory.
Finally, merge the files in alphabetic order on disk.
"""
buckets = {}
for line in lines:
prefix = line[:2]
if prefix not in buckets:
buckets[prefix] = []
buckets[prefix].append(line)
for key, bucket in buckets.items():
buckets[key] = sorted(bucket)
sorted_by_keys = sorted(buckets.items(), key=lambda p: p[0])
output = []
for _, bucket in sorted_by_keys:
output = output + bucket
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.