content
stringlengths 42
6.51k
|
|---|
def input_param(name, value):
"""
Display input name and value for checking.
:param name: parameter name.
:param value: parameter value.
:return: value.
"""
print('{} = {}'.format(name, value))
return value
|
def IsValidKeyForZeroQuery(key):
"""Returns if the key is valid for zero query trigger."""
is_ascii = all(ord(char) < 128 for char in key)
return not is_ascii
|
def inverse_reframe_curves(raw_data, position_of_base_origin):
""" Inverts the shift in a frame of reference of 3D curves,
given the position of the old origin in the new frame of reference """
# Define the current origin
for i in range(len(raw_data)):
raw_data[i][0] -= position_of_base_origin[0]
raw_data[i][1] -= position_of_base_origin[1]
raw_data[i][2] -= position_of_base_origin[2]
return raw_data
|
def is_valid_host(ip):
""" roughly """
parts = list(map(int, ip.split('.')))
return len(parts) == 4 and all(0 <= p < 256 for p in parts)
|
def process(max_slices, no_of_types, slices_list):
"""
The main program reads the input file, processes the calculation
and writes the output file
Args:
max_slices: Maximum number of slices allowed
no_of_types: Number of Pizza to be selected
slices_list: List of number of slices of each pizza
Returns:
total number of slices
, the list of the types of pizza to order
"""
global_slices_sum = 0
global_slices_ordered = []
# Check each pizza from the most slices to the least
for pizza_idx in range(1, len(slices_list) + 1):
slices_sum = 0
slices_ordered = []
# try sum as much as possible
for slice_idx in range(len(slices_list) - pizza_idx, -1, -1) :
if slices_sum + slices_list[slice_idx] > max_slices:
continue # skip if over the max
slices_sum += slices_list[slice_idx]
slices_ordered.insert(0, slice_idx)
if slices_sum == max_slices:
break # stop when max is reached
if slices_sum > global_slices_sum:
global_slices_sum = slices_sum
global_slices_ordered = slices_ordered.copy()
if global_slices_sum == max_slices:
break # stop when max is reached
# Remove the last one to select another combination
while len(slices_ordered) > 0 and global_slices_sum < max_slices:
last_idx = slices_ordered[0]
slices_sum -= slices_list[last_idx]
slices_ordered = slices_ordered[1:]
for slice_idx in range(last_idx - 1, -1, -1):
if slices_sum + slices_list[slice_idx] > max_slices:
continue # skip if over the max
slices_sum += slices_list[slice_idx]
slices_ordered.insert(0, slice_idx)
if slices_sum == max_slices:
break
if slices_sum > global_slices_sum:
global_slices_sum = slices_sum
global_slices_ordered = slices_ordered.copy()
if global_slices_sum == max_slices:
break
return global_slices_sum, global_slices_ordered
|
def to_line_equation(coefs, p):
"""
Substitutes point p into line equation determined by coefs
:param coefs: list or tuple of 3 elements (or anything that can be unzipped to 3 elements)
:param p: point
:return: A * p.real + B * p.imag + C
"""
A, B, C = coefs
return A * p.real + B * p.imag + C
|
def justify_center(content: str, width: int, symbol: str) -> str:
"""
Centers string in symbol, width chars wide.
Parameters
----------
content : string
The string (1+ lines long) to be centered.
width : integer
Width of the column in characters within which
the string will be centered.
symbol : string
Symbol to use as filler surrounding the string.
Returns
-------
string
String consisting of the same number of lines as content.
"""
# Split the content into lines
lines = content.split("\n")
# Loop through lines, centering each
for i in range(len(lines)):
lines[i] = lines[i].center(width, symbol)
# Rejoin the lines into single content string
content = "\n".join(lines)
return content
|
def is_numeric_port(portstr):
"""return: integer port (== True) iff portstr is a valid port number,
False otherwise
"""
if portstr.isdigit():
port = int(portstr)
# 65536 == 2**16
if 0 < port < 65536:
return port
return False
|
def complement(x):
"""
This is a helper function for reverse(rule) and unstrobe(rule).
It is assumed that x is a character and the returned result
is also a character.
"""
return str(8 - int(x))
|
def test_while_reassign(obj1, obj2, obj3, obj4):
"""
>>> test_while_reassign(*values[:4])
(9L, Value(1), 2L, 5L)
>>> sig, syms = infer(test_while_reassign.py_func,
... functype(None, [object_] * 4))
>>> types(syms, 'obj1', 'obj2', 'obj3', 'obj4')
(object_, object_, int, int)
"""
i = 0
while i < 10:
obj1 = i
i += 1
i = 0
while i < 0:
obj2 = i
i += 1
i = 0
while i < 10:
obj3 = i
i += 1
else:
obj3 = 2 # This definition kills any previous definition
i = 5
while i < 10:
obj4 = i
i += 1
break
else:
obj4 = 0
return obj1, obj2, obj3, obj4
|
def _path(path):
"""Helper to build an OWFS path from a list"""
path = "/" + "/".join(str(x) for x in path)
return path.encode("utf-8") + b"\0"
|
def writeADESHeader(
observatory_code,
submitter,
telescope_design,
telescope_aperture,
telescope_detector,
observers,
measurers,
observatory_name=None,
submitter_institution=None,
telescope_name=None,
telescope_fratio=None,
comment=None
):
"""
Write the ADES PSV headers.
Parameters
----------
observatory_code : str
MPC-assigned observatory code
submitter : str
Submitter's name.
telescope_design : str
Telescope's design, eg. Reflector.
telescope_aperture : str
Telescope's primary aperture in meters.
telescope_detector : str
Telescope's detector, eg. CCD.
observers : list of str
First initial and last name (J. Smith) of each of the observers.
measurers : list of str
First initial and last name (J. Smith) of each of the measurers.
observatory_name : str, optional
Observatory's name.
submitter_insitution : str, optional
Name of submitter's institution.
telescope_name : str, optional
Telescope's name.
telescope_fratio : str, optional
Telescope's focal ratio.
comment : str
Additional comment to add to the ADES header.
Returns
-------
list : str
A list of each line in the ADES header.
"""
# Start header with version number
header = [
"# version=2017",
]
# Add observatory [required]
header += ["# observatory"]
header += [f"! mpcCode {observatory_code}"]
if observatory_name is not None:
header += [f"! name {observatory_name}"]
# Add submitter [required]
header += ["# submitter"]
header += [f"! name {submitter}"]
if submitter_institution is not None:
header += ["! institution {}".format(submitter_institution)]
# Add telescope details [required]
header += ["# telescope"]
if telescope_name is not None:
header += [f"! name {telescope_name}"]
header += [f"! design {telescope_design}"]
header += [f"! aperture {telescope_aperture}"]
header += [f"! detector {telescope_detector}"]
if telescope_fratio is not None:
header += [f"! fRatio {telescope_fratio}"]
# Add observer details
header += ["# observers"]
if type(observers) is not list:
err = (
"observers should be a list of strings."
)
raise ValueError(err)
for name in observers:
header += [f"! name {name}"]
# Add measurer details
header += ["# measurers"]
if type(measurers) is not list:
err = (
"measurers should be a list of strings."
)
raise ValueError(err)
for name in measurers:
header += [f"! name {name}"]
# Add comment
if comment is not None:
header += ["# comment"]
header += ["! line {}".format(comment)]
header = [i + "\n" for i in header]
return header
|
def get_tag_value(tags, key):
"""Get a specific Tag value from a list of Tags"""
for tag in tags:
if tag['Key'] == key:
return tag['Value']
else:
raise KeyError
|
def toBytesString(input):
"""
Convert unicode string to bytes string
"""
result = input.encode('latin-1')
return result
|
def set_timeout(timeout=None):
"""
Set requests timeout default to 250 sec if not specified
:return:
"""
if not timeout:
timeout = 250
else:
timeout = int(timeout)
return timeout
|
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*'\
, '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*'\
, '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', '*35214*', '*41532*'\
, '*2*1***'])
False
"""
for index, row in enumerate(board):
if index not in (0, len(board) - 1):
without_hints = row[1:-1]
row_single = set(without_hints)
if len(without_hints) != len(row_single):
return False
return True
|
def _underscore_to_camelcase(value, cap_segment=None):
""" Converts underscore_separated string (aka joined_lower) into camelCase string.
>>> _underscore_to_camelcase('foo_bar_baz')
'FooBarBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=0)
'FOOBarBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=1)
'FooBARBaz'
>>> _underscore_to_camelcase('foo_bar_baz', cap_segment=1000)
'FooBarBaz'
"""
return "".join([s.title() if idx != cap_segment else s.upper() for idx, s in enumerate(value.split('_'))])
|
def cancel_job(job_id):
"""
Cancel job
:param job_id: int, job id
:return: if success, return 1, else return 0
"""
import subprocess
try:
step_process = subprocess.Popen(('qdel', str(job_id)), shell=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = step_process.communicate()
return 1
except Exception as e:
print(e)
return 0
|
def surface_margin_waste (A_approx_waste, A_real_waste):
"""
Calculates the surface margin.
Parameters
----------
A_approximate_waste : float
The approximate heat ransfer area, [m**2]
A_real_waste : float
The real heat transfer area, [m**2]
Returns
-------
surface_margin : float
The surface margin, [%]
References
----------
&&&&
"""
return (A_approx_waste - A_real_waste) * 100 / A_approx_waste
|
def filterCoins(coins):
"""Filtering Coins to useful ones.
Parameters:
coins (lst[(str, str)]): Cryptocurreny Index
Returns:
lst[(str, str)]: List of coins we want to fetch for.
"""
unwanted = set(['USDT', 'USDC', 'BUSD', 'UST', 'WBTC','DAI', 'CRO'])
filtered = filter(lambda coin: coin[0] not in unwanted, coins)
return list(filtered)
|
def get_direction(call):
"""
:param call: represents a call
:return: what direction is the elevator going UP = 1, DOWN = -1
"""
if call[2] < call[3]:
return 1
else:
return -1
|
def _belongs_to_one_of_these_classes(obj, classes):
"""
Returns true if the given object belongs to one of the classes cited
in the list of classes provided.
:param obj: The object to lib_test.
:param classes: The qualifying classes.
:return: Boolean.
"""
for cls in classes:
if isinstance(obj, cls):
return True
return False
|
def code_block(text, language=""):
"""Return a code block.
If a language is specified a fenced code block is produced, otherwise the
block is indented by four spaces.
Keyword arguments:
language -- Specifies the language to fence the code in (default blank).
>>> code_block("This is a simple codeblock.")
' This is a simple codeblock.'
>>> code_block("This is a simple codeblock.\\nBut it has a linebreak!")
' This is a simple codeblock.\\n But it has a linebreak!'
>>> code_block("This block of code has a specified language.", "python")
'```python\\nThis block of code has a specified language.\\n```'
>>> code_block("So\\nmany\\nlinebreaks.", "python")
'```python\\nSo\\nmany\\nlinebreaks.\\n```'
"""
if language:
return f"```{language}\n{text}\n```"
return "\n".join([f" {item}" for item in text.split("\n")])
|
def to_digit(x):
"""
convert a string into an int if it represents an int
otherwise convert it into a float if it represents a float
otherwise do nothing and return it directly
:param x: the input string to be converted
:return: the result of convert
"""
if not isinstance(x, str):
return x
if x == '': return None
try:
y = int(x)
return y
except ValueError:
pass
try:
y = float(x)
return y
except ValueError:
pass
return x
|
def parse_hpo_disease(hpo_line):
"""Parse hpo disease line
Args:
hpo_line(str) a line with the following formatting:
#Format: HPO-id<tab>HPO label<tab>entrez-gene-id<tab>entrez-gene-symbol\
<tab>Additional Info from G-D source<tab>G-D source<tab>disease-ID for link
HP:0000002 Abnormality of body height 3954 LETM1 - mim2gene OMIM:194190
HP:0000002 Abnormality of body height 197131 UBR1 - mim2gene OMIM:243800
HP:0000002 Abnormality of body height 79633 FAT4 orphadata ORPHA:314679
"""
hpo_line = hpo_line.rstrip().split("\t")
hpo_info = {}
gd_source = hpo_line[5] # mim2gene or orphadata
if gd_source == "orphadata":
return
disease = hpo_line[6].split(":")
hpo_info["source"] = disease[0]
hpo_info["disease_nr"] = int(disease[1])
hpo_info["hgnc_symbol"] = None
hpo_info["hpo_term"] = None
if len(hpo_line) >= 3:
hpo_info["hgnc_symbol"] = hpo_line[3]
if len(hpo_line) >= 4:
hpo_info["hpo_term"] = hpo_line[0]
return hpo_info
|
def is_pythagorean_triplet(a, b, c):
"""
A Pythagorean triplet is a set of three natural numbers a < b < c for which:
a^2 + b^2 = c^2
Example: 3,4,5
3^2 + 4^2 = 5^2
9 + 16 = 25
:param int a: first number
:param int b: second number
:param int c: third number
:return bool: returns True if a,b, and c are triplets
"""
try:
if a < b < c:
if (a**2 + b**2) == c**2:
return True
else:
return False
else:
return False
except TypeError:
raise TypeError("Input must be positive integers")
|
def value(card):
"""Returns the numeric value of a card or card value as an integer 1..13"""
prefix = card[:len(card) - 1]
names = {'A': 1, 'J': 11, 'Q': 12, 'K': 13}
if prefix in names:
return names.get(prefix)
else:
return int(prefix)
|
def g(x):
""" y = (1/3)x**3 - x """
return x**3 / 3 - x
|
def hex_to_64(hexstr):
"""Convert a hex string to a base64 string.
Keyword arguments:
hexstr -- the hex string we wish to convert
"""
B64CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
## internals
# bits contains the bits read off so far that don't make enough for a char
bits = 0
# bits_left tracks how many bits are left until a char is ready to convert
bits_left = 6
# output holds the accrued base64 string thus far
output = ''
# Read each hex char as four bits. Every time 6 are accrued,
# convert them to base64 and continue.
for h in hexstr:
hbits = int(h, 16)
if bits_left == 6:
# h's bits aren't enough. Hold 'em and keep going.
bits = hbits
bits_left = 2
elif bits_left == 4:
# h's bits are just enough. Add 'em to the bits bin and convert.
bits = (bits << 4) | hbits
output += B64CHARS[bits]
bits = 0
bits_left = 6
else:
# h's top two bits finish a set of 6. Convert the set
# and save the last two of h's bits.
bits = (bits << 2) | (hbits >> 2)
output += B64CHARS[bits]
bits = hbits & 3
bits_left = 4
# After reading hexstr, we may need some zeroes for padding.
# We should also add '=' chars for each pair of padding bits.
if bits_left < 6:
output += B64CHARS[bits << bits_left]
output += '=' * (bits_left // 2)
return output
|
def format_team_outputs(team: dict = {}) -> dict:
"""Take GitHub API team data and format to expected context outputs
Args:
team (dict): team data returned from GitHub API
Returns:
(dict): team object formatted to expected context outputs
"""
ec_team = {
'ID': team.get('id'),
'NodeID': team.get('node_id'),
'Name': team.get('name'),
'Slug': team.get('slug'),
'Description': team.get('description'),
'Privacy': team.get('privacy'),
'Permission': team.get('permission'),
'Parent': team.get('parent')
}
return ec_team
|
def btc(amount_satoshis, units='BTC'):
"""Given an amount in satoshis return a display string"""
divisor = {
'BTC': 1e8,
'mBTC': 1e5,
'uBTC': 1e2,
'bit': 1e2,
'sat': 1}[units]
amount = amount_satoshis/divisor
spec = units
if spec == 'bit' and amount != 1:
spec = 'bits'
return '{} {}'.format(amount, spec)
|
def get_ip_address_discovery_ports(app):
"""
:return: list
"""
ip_address = app.get('ipAddress', {})
if len(ip_address) == 0:
return []
discovery = app.get('ipAddress', {}).get('discovery', {})
return [int(p['number'])
for p in discovery.get('ports', [])
if 'number' in p]
|
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('len < 1')
return sum(data)/float(n)
|
def first_non_repeating_letter(the_string):
"""
Find first non-repeating letter in a string.
Letters are to be treated case-insensitive,
which means 't' = 'T'. However, one must
return the first non-repeating letter as it
appears in the string, either in uppercase
or lowercase.
'sTress' -> 'T'
:param the_string: str, letters of alphabet
:return: str, single letter or ''
"""
single_letters = {}
# if the left index and right index of the letter
# are the same, we have a single letter. Here we
# enumerate on the lowercase version of the string
# so uppercase and lowercase letters are treated
# identically.
lowercase_string = the_string.lower()
for index, letter in enumerate(lowercase_string):
if lowercase_string.find(letter) == lowercase_string.rfind(letter):
single_letters[letter] = index
if len(single_letters) == 0:
return ''
# pick single letter with smallest index
lowercase_first_letter, index =\
min(single_letters.items(), key=lambda l: l[1])
# display the letter from the original string
# because it could be uppercase
return the_string[index]
|
def hebrew_date(year, month, day):
"""Return an Hebrew date data structure."""
return [year, month, day]
|
def check_undirected(graph):
""" dict -> boolean
Just a sanity check that a graph is in fact undirected.
"""
for node in graph:
for neighbor in graph[node]:
if node not in graph[neighbor]:
return False
return True
|
def dcrossing(m_):
"""
Return the largest k for which the given matching or set
partition has a k-distant crossing.
INPUT:
m -- a matching or set partition, as a list of 2-element tuples
representing the edges. You'll need to call setp_to_edges() on
the objects returned by SetPartitions() to put them into the
proper format.
OUTPUT:
The largest k for which the object has a k-distant crossing.
Matchings and set partitions with no crossings at all yield -1.
EXAMPLES:
The main example from the paper::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossing(setp_to_edges(Set(map(Set, [[1,5],[2,4,9],[3],[6,12],[7,10,11],[8]]))))
3
A matching example::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossing([(4, 7), (3, 6), (2, 5), (1, 8)])
2
TESTS:
The empty matching and set partition are noncrossing::
sage: dcrossing([])
-1
sage: dcrossing(Set([]))
-1
One edge::
sage: dcrossing([Set((1,2))])
-1
sage: dcrossing(Set([Set((1,2))]))
-1
Set partition with block of size >= 3 is always at least
0-dcrossing::
sage: dcrossing(setp_to_edges(Set([Set((1,2,3))])))
0
"""
d = -1
m = list(m_)
while len(m):
e1_ = m.pop()
for e2_ in m:
e1, e2 = sorted(e1_), sorted(e2_)
if (e1[0] < e2[0] and e2[0] <= e1[1] and e1[1] < e2[1] and
e1[1] - e2[0] > d):
d = e1[1] - e2[0]
if (e2[0] < e1[0] and e1[0] <= e2[1] and e2[1] < e1[1] and
e2[1] - e1[0] > d):
d = e2[1] - e1[0]
return d
|
def is_anagram(str1, str2):
"""
:rtype: bool
:param str1: First string
:param str2: Second string
:return: True if the two strings are anagrams
"""
if len(str1) == len(str2):
if sorted(str1) == sorted(str2):
return True
else:
return True
else:
return False
|
def _density_factor(density_units_in: str, density_units_out: str) -> float:
"""helper method for convert_density"""
factor = 1.0
if density_units_in == 'slug/ft^3':
pass
elif density_units_in == 'slinch/in^3':
factor *= 12**4
elif density_units_in == 'kg/m^3':
factor /= 515.378818
else:
msg = 'density_units_in=%r is not valid; use [slug/ft^3]' % density_units_in
raise RuntimeError(msg)
# data is now in slug/ft^3
if density_units_out == 'slug/ft^3':
pass
elif density_units_out == 'slinch/in^3':
factor /= 12**4
elif density_units_out == 'kg/m^3':
factor *= 515.378818
else:
msg = 'density_units_out=%r is not valid; use [slug/ft^3, slinch/in^3]' % density_units_out
raise RuntimeError(msg)
return factor
|
def text_reply(text):
"""
simplify pure text reply
:param text: text reply
:return: general message list
"""
return [{
'type': 'text',
'data': {'text': text}
}]
|
def first_is_higher(v1, v2):
"""
Return a boolean value to indicate if the first software version number is
higher than the second.
Args:
v1 - The first version string to compare
v2 - The second version string to compare
"""
v1_split = v1.split('.')
v2_split = v2.split('.')
higher = len(v1_split) < len(v2_split)
i = 0
max = len(v1_split)
if len(v2_split) < max:
max = len(v2_split)
while i < max:
v1_node = v1_split[i]
v2_node = v2_split[i]
if v1_node.isdigit() and v2_node.isdigit():
v1_node = int(v1_node)
v2_node = int(v2_node)
if v1_node > v2_node:
return True
if v2_node > v1_node:
return False
i += 1
return higher
|
def _Indentation(indentation_level):
"""Returns the indentation string."""
return " " * indentation_level
|
def get_data_from_given_model(spam_mail_model, ham_mail_model):
"""
This is the function used to divide the data into test and train data
:param spam_mail_model: This is the representation(list) of each spam document in the given format
:param ham_mail_model: This is the representation(list) of each ham document in the given format
:return: the train and test set
"""
for each_dict in spam_mail_model:
each_dict["this_is_the_class_of_the_document"] = 1
for each_dict in ham_mail_model:
each_dict["this_is_the_class_of_the_document"] = 0
all_data = spam_mail_model + ham_mail_model
# We are using this step to shuffle our data so that different data goes into training and testing everything
return all_data
|
def replace_cr_with_newline(message: str) -> str:
"""
TQDM and requests use carriage returns to get the training line to update for each batch
without adding more lines to the terminal output. Displaying those in a file won't work
correctly, so we'll just make sure that each batch shows up on its one line.
"""
if "\r" in message:
message = message.replace("\r", "")
if not message or message[-1] != "\n":
message += "\n"
return message
|
def bubble_sort(array):
"""Repeatedly step through the list, compare adjacent items and swap
them if they are in the wrong order. Time complexity of O(n^2).
Parameters
----------
array : iterable
A list of unsorted numbers
Returns
-------
array : iterable
A list of sorted numbers
"""
for i in range(len(array) - 1):
for j in range(len(array) - 1 - i):
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
return array
|
def calculate_test_code_to_production_code_ratio(production_code_metrics, test_code_metrics):
"""Calculate the ratio between the test code and the production code."""
lines_of_code = production_code_metrics["SUM"]["code"]
lines_of_test_code = test_code_metrics["SUM"]["code"]
return float(lines_of_test_code) / float(lines_of_code)
|
def is_boundary_edge(a, b, bdy_edges):
"""
Checks whether edge (a, b) is in the list of boundary edges
"""
for edge in bdy_edges:
a0, b0 = edge
if a == a0 and b == b0:
return True
return False
|
def align(bytes, alignment=16):
"""Align BYTES to a multiple of ALIGNMENT"""
return ((bytes + alignment - 1) // alignment) * alignment
|
def get_name(row):
"""
Returns a built string containing of last name and first name
:param row: Raw row from CSV
:return: Name string
"""
return row[0] + " " + row[1]
|
def is_mirror(master_uri, compare_uri):
""" check if the given add_url is idential or a mirror of orig_uri e.g.:
master_uri = archive.ubuntu.com
compare_uri = de.archive.ubuntu.com
-> True
"""
# remove traling spaces and "/"
compare_uri = compare_uri.rstrip("/ ")
master_uri = master_uri.rstrip("/ ")
# uri is identical
if compare_uri == master_uri:
#print "Identical"
return True
# add uri is a master site and orig_uri has the from "XX.mastersite"
# (e.g. de.archive.ubuntu.com)
try:
compare_srv = compare_uri.split("//")[1]
master_srv = master_uri.split("//")[1]
#print "%s == %s " % (add_srv, orig_srv)
except IndexError: # ok, somethings wrong here
#print "IndexError"
return False
# remove the leading "<country>." (if any) and see if that helps
if "." in compare_srv and \
compare_srv[compare_srv.index(".") + 1:] == master_srv:
#print "Mirror"
return True
return False
|
def display_benign_graph(value):
"""
Function that either displays the graph of the top 20 benign domains or
hides them depending on the position of the toggle switch.
Args:
value: Contains the value of the toggle switch.
Returns:
A dictionary that communicates with the Dash interface whether to
display the graph of the top 20 benign domains or hide them.
"""
if value is True:
return {'display': 'none'}
else:
return {'display': 'unset'}
|
def pollardlambda(g, p, t, q, b, w, theta=8) :
"""
Find x = dlog_g(t) in Z/pZ*, given that is in [b, b+w].
"""
from itertools import count
from math import sqrt
# Size of domain of f (heuristic).
#L = int(sqrt(w))
L = 5
# Psuedorandom function to walk on exponents.
def f(TW) :
# return 1
return pow(2, TW % L)
# Some suggested parameters discussed in Oorschot and Wiener.
# Appears to be a birthday-like bound for success, around
# 1 - e**(-theta).
a = 1/(2*sqrt(theta))
m = a * sqrt(w)
N = int(m*theta) # Tame sequence iterations.
MAX_WILD_ITERS = 10**5
# Tame & wild sequence initial values.
T = pow(g, b+w, p)
W = t
if W == T :
return (b+w) % q
# Compute Nth tame sequence element (lay the trap).
dT = 0
for i in range(N) :
dT += f(T)
T = (T * pow(g, f(T), p)) % p
# T = g**(b+w+dT)
# Find element in wild sequence equal to Nth tame element
# and return the corresponding
dW = 0
for j in count() :
dW += f(W)
W = (W * pow(g, f(W), p)) % p
if W == T :
# W = t*g**dW
return (b+w + dT - dW) % q
if dW > w + dT :
raise RuntimeError('Failure: ' + str((g,p,t,q,b,w)))
raise RuntimeError('Discrete log not found: ' + str((g,p,t,q,b,w)))
|
def _hue_process_transition_time(transition_seconds):
""" Transition time is in 1/10th seconds
and cannot exceed MAX_TRANSITION_TIME. """
# Max transition time for Hue is 900 seconds/15 minutes
return min(9000, transition_seconds * 10)
|
def _pop(line, key, use_rest):
"""
Helper for the line parser.
If key is a prefix of line, will remove ir from the line and will
extract the value (space separation), and the rest of the line.
If use_rest is True, the value will be the rest of the line.
Return a tuple with the value and the rest of the line.
"""
value = None
if line.startswith(key):
line = line[len(key) :].strip()
if use_rest:
value = line
line = ""
else:
value, line = line.split(" ", 1)
return value, line.strip()
|
def misclassification_rate(preds, alt_preds, k=1):
"""
Computes the misclassification rate for a group of base and alternative predictions.
For details, check: Narodytska, Nina, and Shiva Prasad Kasiviswanathan. "Simple black-box
adversarial perturbations for deep networks." arXiv preprint arXiv:1612.06299 (2016).
:param preds: The list of base predictions
:param alt_preds: The list of alternative predictions
:param k: The number of misclassified predictions to trigger a misclassification
:returns: The misclassification rate
"""
n_preds = len(preds)
n_misclassification = 0
for i in range(len(preds)):
if len(preds[i]) == 0:
n_preds -= 1
continue
elif preds[i][0] not in alt_preds[i][:k]:
n_misclassification += 1
rate = round(n_misclassification / n_preds, 2)
return rate
|
def l2_bit_rate(packet_size, preamble, pps):
"""
Return the l2 bit rate
:param packet_size: packet size on the line in bytes
:param preamble: preamble size of the packet header in bytes
:param pps: packets per second
:return: l2 bit rate as float
"""
return (packet_size * preamble) * pps
|
def get_node_attr_by_key(nodes, key, attr, subkey=None):
"""
returns given attribute of element in nodes (=filtered by key)
"""
if not subkey:
possible_nodes = [node for node in nodes if node['key'] == key]
else:
possible_nodes = [node for node in nodes if node['key']
== key and node['subkey'] == subkey]
if len(possible_nodes):
return possible_nodes[0][attr]
else:
return None
|
def _toint(string):
"""
Some bits sometimes are a character. I haven't found what do they mean,
but they break cinfony fingerprints unless taken care of. This functions is just for that.
"""
if string.isdigit():
return int(string)
else:
return 0
|
def flare_value(flare_class):
"""Convert a string solar flare class [1] into the lower bound in W/m**2 of the
1-8 Angstrom X-Ray Band for the GOES Spacecraft.
An 'X10' flare = 0.001 W/m**2.
This function currently only works on scalars.
Parameters
----------
flare_class : string
class of solar flare (e.g. 'X10')
Returns
-------
value : float
numerical value of the GOES 1-8 Angstrom band X-Ray Flux in W/m**2.
References
----------
[1] See http://www.spaceweatherlive.com/en/help/the-classification-of-solar-flares
Example
-------
value = flare_value('X10')
Written by S.Chakraborty, 7th March 2017
"""
flare_dict = {'A':-8, 'B':-7, 'C':-6, 'M':-5, 'X':-4}
letter = flare_class[0]
power = flare_dict[letter.upper()]
coef = float(flare_class[1:])
value = coef * 10.**power
return value
|
def delete_from(string, deletes):
""" Delete the strings in deletes from string. """
for d in deletes:
string = string.replace(d, '')
return string
|
def main(A):
"""Pythagorean Triplet in Array A."""
squares = sorted([x**2 for x in A])
for x in reversed(squares):
for y in squares[0:
squares.index(x)]:
if x - y in squares[squares.index(y):
squares.index(x)]:
return True
return False
|
def get_iou(bb1, bb2):
"""
Calculate the coverage of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
|
def all_keys_known(dictionary, known_keys, logger=None):
"""
Checks whether all keys of the dictionary are listed in the list of known keys.
:param dictionary: dict, dictionary whose keys are verified
:param known_keys: list, list of known keys
:param logger: logger instance
:return: boolean, True if all keys of the dictionary are listed in known_keys, False otherwise
"""
unknown_keys = [k for k in dictionary if k not in known_keys]
if unknown_keys:
if logger:
logger.error('The following keys are unknown: {}.\n'.
format(','.join(["'{}'".format(x) for x in unknown_keys])))
else:
print('ERROR: The following keys are unknown: {}.\n'.
format(','.join(["'{}'".format(x) for x in unknown_keys])))
return False
else:
return True
|
def ricc_lrcf_solver_options(lrradi_tol=1e-10,
lrradi_maxiter=500,
lrradi_shifts='hamiltonian_shifts',
hamiltonian_shifts_init_maxiter=20,
hamiltonian_shifts_init_seed=None,
hamiltonian_shifts_subspace_columns=6):
"""Returns available Riccati equation solvers with default solver options.
Parameters
----------
lrradi_tol
See :func:`solve_ricc_lrcf`.
lrradi_maxiter
See :func:`solve_ricc_lrcf`.
lrradi_shifts
See :func:`solve_ricc_lrcf`.
hamiltonian_shifts_init_maxiter
See :func:`hamiltonian_shifts_init`.
hamiltonian_shifts_init_seed
See :func:`hamiltonian_shifts_init`.
hamiltonian_shifts_subspace_columns
See :func:`hamiltonian_shifts`.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'lrradi': {'type': 'lrradi',
'tol': lrradi_tol,
'maxiter': lrradi_maxiter,
'shifts': lrradi_shifts,
'shift_options':
{'hamiltonian_shifts': {'type': 'hamiltonian_shifts',
'init_maxiter': hamiltonian_shifts_init_maxiter,
'init_seed': hamiltonian_shifts_init_seed,
'subspace_columns': hamiltonian_shifts_subspace_columns}}}}
|
def check_height(hgt):
"""hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
"""
num, unit = hgt[:-2], hgt[-2:]
if unit == 'cm' and (150 <= float(num) <= 193):
return True
if unit == 'in' and (59 <= float(num) <= 76):
return True
return False
|
def _get(weather_data, item):
"""get the data from url"""
return weather_data.get(item, "")
|
def custom_sort(stack):
"""Custom version of list sort. This should never be used."""
new_stack = []
while len(stack):
temp = stack.pop()
while len(new_stack) and new_stack[-1] > temp:
stack.append(new_stack.pop())
new_stack.append(temp)
return new_stack
|
def doMath(op, op1, op2):
""" compute the infix statement"""
#checks what operator is being used and returns the result
if op == "^":
return op1 ^ op2
elif op == "*":
return op1 * op2
elif op == "/":
if op2 == 0:
#raises an error when divided by zero
raise ValueError('Cannot divide by zero')
return op1 / op2
elif op == "+":
return op1 + op2
else:
return op1 - op2
|
def get_hd_domain(username, default_domain='default'):
"""Returns the domain associated with an email address.
Intended for use with the OAuth hd parameter for Google.
Args:
username: Username to parse.
default_domain: Domain to set if '@suchandsuch.huh' is not part of the
username. Defaults to 'default' to specify a regular Google account.
Returns:
String of the domain associated with username.
"""
name, at_sign, domain = username.partition('@')
# If user specifies gmail.com, it confuses the hd parameter
# (thanks, bartosh!)
if domain == 'gmail.com' or domain == 'googlemail.com':
return 'default'
return domain or default_domain
|
def vnc_bulk_get(vnc_api, obj_name, obj_uuids=None, parent_uuids=None,
fields=None):
"""Get bulk VNC object."""
# search using object uuid list or parent uuid list
chunk_size = 20
obj_list = []
chunk_idx = 0
if obj_uuids and parent_uuids or obj_uuids and not parent_uuids:
search_by_obj = True
uuid_list = obj_uuids
num_uuids = len(obj_uuids)
elif parent_uuids:
search_by_obj = False
uuid_list = parent_uuids
num_uuids = len(parent_uuids)
else:
return []
while chunk_idx < num_uuids:
chunk_uuids = uuid_list[chunk_idx:chunk_idx + chunk_size]
chunk_obj_list = getattr(vnc_api, obj_name + "_list")(
obj_uuids=chunk_uuids if search_by_obj else None,
parent_id=chunk_uuids if not search_by_obj else None,
fields=fields).get(obj_name.replace('_', '-'))
obj_list += chunk_obj_list
chunk_idx += chunk_size
return obj_list
|
def translate(sequence):
"""
Translates a given nucleotide sequence to a string or sequence of amino acids.
returns a string of sequence of amino acids.
:param sequence:
:return:
"""
valid = (len(sequence) % 3 == 0)
table = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W', }
protein = ""
if valid:
for i in range(0, len(sequence), 3):
codon = sequence[i:i + 3]
try:
protein += table[codon]
except:
valid = False
break
if valid:
return protein
else:
print("The sequence of nucleotides is not valid.")
return "INVALID SEQUENCE OF NUCLEOTIDES."
|
def __rall_power(parent_diam,e=1.5):
"""
Returns the diameter of a child segment of a branch
according to Rall's Power Law as described
in Van Ooyen et al 2010. Assumes child branches
will be of equal diameter.
"""
child_diam=parent_diam/(2**(1/e))
return child_diam
|
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((0x4E00 <= cp <= 0x9FFF) or #
(0x3400 <= cp <= 0x4DBF) or #
(0x20000 <= cp <= 0x2A6DF) or #
(0x2A700 <= cp <= 0x2B73F) or #
(0x2B740 <= cp <= 0x2B81F) or #
(0x2B820 <= cp <= 0x2CEAF) or
(0xF900 <= cp <= 0xFAFF) or #
(0x2F800 <= cp <= 0x2FA1F)): #
return True
return False
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp) or is_whitespace(char):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
|
def _determine_format(format_string: str):
"""
Determines file format from a string. Could be header/ext.
Args:
format_string: Header or file extension.
Returns:
str: Type of the image.
"""
formats = ["PNG",
"TIF", "TIFF",
"JPG", "JPEG"]
for format in formats:
if format in format_string.upper():
if "JPEG" in format_string.upper():
return "JPG"
if "TIF" in format_string.upper():
return "TIFF"
return format
return "JPG"
|
def urljoin(base, *args):
"""
:param base:
:param args:
:return:
"""
url = base
if url[-1] != '/':
url += '/'
for arg in args:
arg = arg.strip('/')
url += arg + '/'
if '?' in url:
url = url[:-1]
return url
|
def as_strs(x):
"""Convert a list of items to their string representations"""
return [str(xi) for xi in x]
|
def msb(n):
"""Given an integer >= 0, return the most significant bit position."""
assert n < 2**32
c = 0
while n > 0:
n >>= 1
c += 1
return c
|
def check_sample(n_samples: int, sample: int) -> bool:
"""
Check sample is between 0 <= from_sample < n_samples
Parameters
----------
n_samples : int
Number of samples
sample : int
Sample to check
Returns
-------
bool
Return True if no errors
Raises
------
ValueError
If sample < 0
ValueError
If sample > n_samples
Examples
--------
>>> from resistics.sampling import check_sample
>>> check_sample(100, 45)
True
>>> check_sample(100, 100)
Traceback (most recent call last):
...
ValueError: Sample 100 must be < 100
>>> check_sample(100, -1)
Traceback (most recent call last):
...
ValueError: Sample -1 must be >= 0
"""
if sample < 0:
raise ValueError(f"Sample {sample} must be >= 0")
if sample >= n_samples:
raise ValueError(f"Sample {sample} must be < {n_samples}")
return True
|
def appname_task_id(appname):
"""Returns the task id from app instance name."""
_appname, taskid = appname.split('#')
return taskid
|
def _bowtie_major_version(stdout):
"""
bowtie --version returns strings like this:
bowtie version 0.12.7
32-bit
Built on Franklin.local
Tue Sep 7 14:25:02 PDT 2010
"""
version_line = stdout.split("\n")[0]
version_string = version_line.strip().split()[2]
major_version = int(version_string.split(".")[0])
# bowtie version 1 has a leading character of 0 or 1
if major_version == 0 or major_version == 1:
major_version = 1
return major_version
|
def get_json_list(list_of_strings, include_brackets=True):
"""
Convert a list of strings into a single string representation, suitable for
inclusion in GraphQL queries.
Parameters
---------
list_of_strings : list of str
Strings to convert to single string
include_backets : bool
Whether to include square braces in the returned string
Returns
-------
stringified_list : str
String representation of the input list
Example
-------
>>> get_json_list(['cat', 'dog'])
'["cat", "dog"]'
>>> get_json_list(['cat', 'dog'], include_brackets=False)
'"cat", "dog"'
"""
json_list = ', '.join('"%s"' % string for string in list_of_strings)
if include_brackets:
json_list = '[' + json_list + ']'
return json_list
|
def _map_key_binding_from_shortcut(shortcut):
"""Return a keybinding sequence given a menu command shortcut"""
if not shortcut:
return None
keys = shortcut.split("+")
key_mappings = {"Cmd": "Command", "Ctrl": "Control"}
keybinding = []
for k in keys:
if k in key_mappings:
keybinding.append(key_mappings[k])
else:
if len(k) == 1 and ord("A") <= ord(k) <= ord("Z"):
k = k.lower()
keybinding.append(k)
return "<" + "-".join(keybinding) + ">"
|
def parseKernelLog(raw):
"""Parse a raw message from kernel log format
/dev/kmsg record format:
facility,sequence,timestamp,[optional,..];message\n
Args:
raw : the raw log message as a string
Returns:
{level, sequence, timestamp, message} message
None on format error
"""
# split line in header and body
separator_index = raw.find(';')
if separator_index < 0:
return None
header = raw[:separator_index]
message = raw[separator_index+1:]
# split header
raw_level, raw_sequence, raw_timestamp, other = header.split(',')
try:
return dict(
level=int(raw_level),
sequence=int(raw_sequence),
timestamp=float(raw_timestamp)/1000000,
message=message,
)
except:
return None
|
def output_test(filename: str, pattern: str) -> bool: # pylint: disable=unused-argument
"""Test the output.
Always passes if ``pattern == "pass"``. Otherwise, fails.
"""
return pattern == "pass"
|
def compute_internet_checksum(data):
"""
Function for Internet checksum calculation. Works
for both IP and UDP.
"""
checksum = 0
n = len(data) % 2
# data padding
pad = bytearray('', encoding='UTF-8')
if n == 1:
pad = bytearray(b'\x00')
# for i in range(0, len(data + pad) - n, 2):
for i in range(0, len(data)-1, 2):
checksum += (ord(data[i]) << 8) + (ord(data[i + 1]))
if n == 1:
checksum += (ord(data[len(data)-1]) << 8) + (pad[0])
while checksum >> 16:
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum = ~checksum & 0xffff
return checksum
|
def is_blank(value):
"""Check for blankness.
Args:
value: The value to check
Returns:
True if value is an empty string, not a string, etc.
False if value is a non-empty string
"""
if value is None:
return True
if isinstance(value, str) is False:
return True
# Empty strings return False
if not value.strip():
return True
return False
|
def get_cheat_sheet(cheat_sheet):
"""converts a cheat sheet from .json to string to display
Parameters
----------
:param dictionary cheat_sheet: dictionary that stores the content of given cheat sheet.
:return: a str representation of a cheat sheet.
"""
sheet = []
separator = '\n'
for data_type in cheat_sheet:
sheet.append(f'__**{data_type}**__')
for method in cheat_sheet[data_type]:
method_description = cheat_sheet[data_type][method]
sheet.append(f'**{method}** - {method_description}')
sheet.append('')
return separator.join(sheet)
|
def parse_no_db(cmd, args, user):
"""
Parses commands relating to locating and opening a database
"""
cmd = cmd.lower()
if cmd == "open" or cmd == "connect":
if args:
try:
user.connect(args)
return f"successfully connected to {' '.join(args)}"
except Exception as e:
return f"could not connect to {' '.join(args)}\nReason: {e}"
else:
return "No database specified"
|
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions. Zero if predicted
reduction is zero.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
|
def calculate_internal_jumps(alignments):
""" Count number of times the set of source word indices aligned to a target word index are not adjacent
Each non adjacent set of source word indices counts only once
>>> calculate_internal_jumps([{1,2,4}, {42}])
1
>>> calculate_internal_jumps([{1,2,3,4}])
0
>>> calculate_internal_jumps([set()])
0
"""
def contiguous(s):
if len(s) <= 1:
return True
else:
elements_in_contiguous_set = max(s) - min(s) + 1
return elements_in_contiguous_set == len(s)
return [contiguous(s) for s in alignments].count(False)
|
def integer_to_ascii_symbol(value):
"""Convert value to ascii symbol."""
if(value == 0):
return ' '
elif(value == 1):
return '+'
elif(value == 2):
return '#'
|
def name_parser(name):
"""
{INDEX}.png
"""
seg = name.split(".")
return "Dataset index: {}".format(seg[0])
|
def mysorted(*args, **kwargs):
"""sorted that accepts the chunksize param"""
_ = kwargs.pop("chunksize", None)
return sorted(*args, **kwargs)
|
def serialize_recent_url(analysis, type_str):
"""Convert output of images to json"""
tmp_output = []
output = {}
if type_str == 'recentimages-thumbs':
output['id'] = None
output['type'] = type_str
for e in range(len(analysis)):
temp_obj = {
'source': analysis[e].get('source', None),
'thumbnail_url': analysis[e].get('thumb_url', None)
}
tmp_output.append(temp_obj)
output['attributes'] = tmp_output
elif type_str == 'recentimages-tiles':
output['id'] = None
output['type'] = type_str
for e in range(len(analysis)):
temp_obj = {
'source_id': analysis[e].get('source', None),
'tile_url': analysis[e].get('tile_url', None)
}
tmp_output.append(temp_obj)
output['attributes'] = tmp_output
return output
|
def fill_template(map_filepath, resolution, origin): # NOTE: Copied from generate_map_yaml.py
"""Return a string that contains the contents for the yaml file, filling out the blanks where
appropriate.
Args:
map_filepath: Absolute path to map file (e.g. PNG).
resolution: Resolution of each pixel in the map in meters.
origin: Uhhh.
"""
template = """image: MAP_FILEPATH
resolution: RESOLUTION
origin: [ORIGIN_X, ORIGIN_Y, YAW]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
"""
template = template.replace('MAP_FILEPATH', map_filepath)
template = template.replace('RESOLUTION', str(resolution))
template = template.replace('ORIGIN_X', str(origin[0]))
template = template.replace('ORIGIN_Y', str(origin[1]))
template = template.replace('YAW', str(origin[2]))
return template
|
def count_consecutives(tosses):
"""
Counts the number of consecutive heads or tails.
"""
consecutive_tosses = 1
max_consecutive_tosses = 1
for i in range(0, len(tosses) - 1):
if tosses[i] == tosses[i + 1]:
consecutive_tosses += 1
max_consecutive_tosses = max(max_consecutive_tosses, consecutive_tosses)
else:
consecutive_tosses = 1
return max_consecutive_tosses
|
def is_parent( parent_path, path ):
"""
Is a path the parent path of another path?
/a/b is the parent of /a/b/c, but
/a/b is not the parent of /a/b/c/d or /a/e or /f.
"""
pp = parent_path.strip("/")
p = path.strip("/")
if not p.startswith( pp ):
return False
if "/" in pp[len(p):]:
return False
return True
|
def imports_on_separate_lines(logical_line):
"""
Imports should usually be on separate lines.
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if found > -1:
return found, "E401 multiple imports on one line"
|
def calculate_input_voltage(excitation, Rdc, nominal_impedance):
"""Simplify electrical input definition to input voltage."""
val, type = excitation
if type == "Wn":
input_voltage = (val * nominal_impedance) ** 0.5
elif type == "W":
input_voltage = (val * Rdc) ** 0.5
elif type == "V":
input_voltage = val
else:
print("Input options are [float, ""V""], \
[float, ""W""], [float, ""Wn"", float]")
return(None)
return(input_voltage)
|
def headerns(target):
"""
Returns the header_namespace of a target, suffixed with a /, or empty string if no namespace.
target: JSON map for a target.
"""
ns = target['headerNamespace'] if 'headerNamespace' in target else ""
return ((ns + "/") if len(ns) > 0 else "")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.