content stringlengths 42 6.51k |
|---|
def is_user(user):
"""
Validates user dict exists.
:param user: A dict object representing user
:return (bool, str): Bool for whether or not user is valid, String for justification.
"""
return (True, None) if user else (False, 'No user found') |
def plural(n, singular, plural, with_number=True):
"""Return the singular or plural form of a word, according to the number.
If ``with_number`` is true (default), the return value will be the number
followed by the word. Otherwise the word alone will be returned.
Usage:
>>> plural(2, "ox", "oxen")
'2 oxen'
>>> plural(2, "ox", "oxen", False)
'oxen'
"""
if n == 1:
form = singular
else:
form = plural
if with_number:
return "%s %s" % (n, form)
else:
return form |
def nub(l, reverse=False):
"""
Removes duplicates from a list.
If reverse is true keeps the last duplicate item
as opposed to the first.
"""
if reverse:
seen = {}
result = []
for item in reversed(l):
if item in seen: continue
seen[item] = 1
result.append(item)
return reversed(result)
else:
seen = {}
result = []
for item in l:
if item in seen: continue
seen[item] = 1
result.append(item)
return result |
def check_custom_dataloader(data: dict) -> bool:
"""Check if custom dataloader is used in calibration or evaluation."""
return any(
[
data.get("quantization", {}).get("dataloader", {}).get("name") == "custom",
data.get("evaluation", {}).get("dataloader", {}).get("name") == "custom",
],
) |
def is_any_list_overlap(list_a, list_b):
"""
Is there any overlap between two lists
:param list_a: Any list
:param list_b: Any other list
:return: True if lists have shared elements
"""
return any({*list_a} & {*list_b}) |
def sort_array(arr):
"""
Sorts only the odd numbers leaving the even numbers in place
:param: arr Array of numbers
:return sorted numbers with only the odds sorted by value and evens retain their place
:rtype: list
"""
odds = sorted([x for x in arr if x % 2], reverse=True)
return [x if x % 2 == 0 else odds.pop() for x in arr] |
def getclass(obj):
"""
Unfortunately for old-style classes, type(x) returns types.InstanceType. But x.__class__
gives us what we want.
"""
return getattr(obj, "__class__", type(obj)) |
def is_vowel(ch):
""" check if a char is a vowel """
if ch.lower() in list('aeiou'):
return True
return False |
def kth_element_to_last(linked_list, k):
"""Returns kth to last element of a linked list.
Args:
linked_list: An instance object of LinkedList.
k: Integer >=1, k=1 is last element, k=2 second to last, and so on.
Returns:
If kth to last element exists, returns value of data field.
Otherwise returns None.
"""
if not linked_list:
return None
if not linked_list.head or k < 1:
return None
current = linked_list.head
kth_last = linked_list.head
# Move current k-1 steps forward
for _ in range(k-1):
if not current.next_node:
return None
else:
current = current.next_node
# Move forward until tail of linked list is reached
while current.next_node:
current = current.next_node
kth_last = kth_last.next_node
return kth_last.data |
def extract_id(url):
"""
>>> extract_id('/bacon/eggs/')
'eggs'
"""
return url and url.split('/')[-2] |
def enketo_error500_mock(url, request): # pylint: disable=unused-argument
"""
Returns mocked Enketo Response object for all queries to enketo.ona.io that
may result in an HTTP 500 error response.
"""
return {'status_code': 500,
'content': "Something horrible happened."} |
def csv_int2bin(val):
"""format CAN id as bin
100 -> 1100100
"""
return f"{val:b}" |
def _convert_ratio_to_int(ratio: float):
"""
Round the ratio to 2 decimal places, multiply by 100, and take the integer part.
"""
return int((round(ratio, 2) * 100)) |
def is_search_type(splunk_record_key):
"""Return True if the given string is a search type key.
:param splunk_record key: The string to check
:type splunk_record_key: str
:rtype: bool
"""
return splunk_record_key == 'searchtype' |
def set_axis_limits(ax, xlimits=None, ylimits=None):
"""Sets the x- and y-boundaries of the axis (if provided)
:param ax: axis object
:param xlimits: a 2-tuple (lower_bound, upper_bound)
:param ylimits: a 2-tuple (lower_bound, upper_bound)
:returns: ax
"""
if xlimits is not None:
ax.set_xlim(*xlimits)
if ylimits is not None:
ax.set_ylim(*ylimits)
return ax |
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x)) |
def line(p1, p2, debug = False):
"""Creates a line from two points
From http://stackoverflow.com/a/20679579
Args:
p1 ([float, float]): x and y coordinates
p2 ([float, float]): x and y coordinates
Returns:
(float, float, float): x, y and _
"""
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C |
def sphere_function(vals):
"""
A very easy test function
Parameters:
vals - a list specifying the point in N-dimensionsla space to be
evaluated
"""
return sum(val**2 for val in vals) |
def f(x):
"""A real-valued function to optimized."""
return sum(x)**2 |
def splice_in_seq(new_seq: str, old_seq: str, full_seq: str) -> str:
"""
Replace old_seq with new_seq in full_seq. full_seq is expected to contain old_seq
>>> splice_in_seq("CASS", "CASR", "ABCDEFGCASRZZZ")
'ABCDEFGCASSZZZ'
>>> splice_in_seq("$$", "CASS", "ABCDEFGCASSYLMZ")
'ABCDEFG$$YLMZ'
>>> splice_in_seq("CASSRKDES", "CASSRKDDD", "CASSRKDDD")
'CASSRKDES'
"""
# Locate the old sequence in the full sequence
assert old_seq in full_seq, f"Could not find {old_seq} in {full_seq}"
assert full_seq.count(old_seq) == 1, "Sequence to be replaced is nonunique"
old_seq_start_idx = full_seq.index(old_seq)
old_seq_stop_idx = old_seq_start_idx + len(old_seq)
assert full_seq[old_seq_start_idx:old_seq_stop_idx] == old_seq
# replace old seq with new seq
retval = full_seq[:old_seq_start_idx] + new_seq + full_seq[old_seq_stop_idx:]
return retval |
def first_shift_is_valid(cur_individual):
""" checks if the first shift is not an extension """
return cur_individual[:2] != '00' |
def powerhalo(r,rs=1.,rc=0.,alpha=1.,beta=1.e-7):
"""return generic twopower law distribution
inputs
----------
r : (float) radius values
rs : (float, default=1.) scale radius
rc : (float, default=0. i.e. no core) core radius
alpha : (float, default=1.) inner halo slope
beta : (float, default=1.e-7) outer halo slope
returns
----------
densities evaluated at r
notes
----------
different combinations are known distributions.
alpha=1,beta=2 is NFW
alpha=1,beta=3 is Hernquist
alpha=2.5,beta=0 is a typical single power law halo
"""
ra = r/rs
return 1./(((ra+rc)**alpha)*((1+ra)**beta)) |
def get_destroyed_endpoint(vol, array):
"""Return Destroyed Endpoint or None"""
try:
return bool(
array.get_volume(vol, protocol_endpoint=True, pending=True)[
"time_remaining"
]
!= ""
)
except Exception:
return False |
def clean_up_hero_name(hero_name):
"""
#This function will fix the hero name with a consistent naming scheme
"""
hero_name_replace = {
"spiderwoman":"Spider Woman",
"spider woman":"Spider Woman",
"spider-woman":"Spider Woman",
"spiderman":"Spider Man",
"spider man":"Spider Man",
"spider-man":"Spider Man",
"ant man":"Ant Man",
"antman":"Ant Man",
"scarlet witch":"Scarlet Witch",
"scarlet-witch":"Scarlet Witch",
"scarlett witch":"Scarlet Witch",
"scarlett-witch":"Scarlet Witch",
"scarlettwitch":"Scarlet Witch",
"scarletwitch":"Scarlet Witch",
"iron man":"Iron Man",
"iron-man":"Iron Man",
"ironman":"Iron Man",
"black widow":"Black Widow",
"black-widow":"Black Widow",
"blackwidow":"Black Widow",
"ant man":"Ant Man",
"ant-man":"Ant Man",
"antman":"Ant Man",
"she hulk":"She-Hulk",
"she-hulk":"She-Hulk",
"shehulk":"She-Hulk",
"black panther":"Black Panther",
"black-panther":"Black Panther",
"blackpanther":"Black Panther",
"captain marvel":"Captain Marvel",
"captain-marvel":"Captain Marvel",
"captainmarvel":"Captain Marvel",
"captain america":"Captain America",
"captain-america":"Captain America",
"captainamerica":"Captain America",
"ms. marvel":"Ms. Marvel",
"ms.marvel":"Ms. Marvel",
"msmarvel":"Ms. Marvel",
"ms marvel":"Ms. Marvel",
"msmarvel":"Ms. Marvel",
"dr. strange":"Doctor Strange",
"dr.strange":"Doctor Strange",
"drstrange":"Doctor Strange",
"dr strange":"Doctor Strange",
"doctor strange":"Doctor Strange",
"doctorstrange":"Doctor Strange",
"hulk":"Hulk",
"wasp":"Wasp",
"thor":"Thor",
"quicksilver":"Quicksilver",
"hawkeye":"Hawkeye",
"hawk eye":"Hawkeye",
"hawk-eye":"Hawkeye",
"rocket":"Rocket Raccoon",
"rocket raccoon":"Rocket Raccoon",
"rocket racoon":"Rocket Raccoon",
"groot":"Groot",
"drax":"Drax",
"gamora":"Gamora",
"starlord":"Star Lord",
"star lord":"Star Lord",
"star-lord":"Star Lord",
"venom":"Venom",
"adam warlock":"Adam Warlock",
"adamwarlock":"Adam Warlock",
"spectrum":"Spectrum",
"nebula":"Nebula",
"war machine":"War Machine",
"war-machine":"War Machine",
"warmachine":"War Machine",
"vision":"Vision",
"valkyrie":"Valkyrie",
"miles morales":"Miles Morales",
"spidergwen":"Spider Gwen",
"spider gwen":"Spider Gwen",
"ghost spider":"Spider Gwen",
}
try:
return hero_name_replace[hero_name]
except KeyError:
print("Can't find {} hero".format(hero_name))
return hero_name |
def _build_js_asset(js_uri):
"""Wrap a js asset so it can be included on an html page"""
return '<script src="{uri}"></script>'.format(uri=js_uri) |
def unique_list_str(L):
""" Ensure each element of a list of strings is unique by appending a number to duplicates.
Note that this fails to generate uniqueness if a trio "Name", "Name", "Name_1" exists.
"""
L_unique = []
count = {}
for s in L:
if s in count:
s_unique = str(s) + "_" + str(count[s])
count[s] += 1
else:
s_unique = str(s)
count[s] = 1
L_unique.append(s_unique)
return L_unique |
def _inches_to_meters(length):
"""Convert length from inches to meters"""
return length * 2.54 / 100.0 |
def calculate_cpu_metric(data, code, ram):
"""
This function calculates the cpu's data and general capability based upon the memory and RAM available to it. It
doesn't consider the speed of the chip. It calculates based on the following equation:
metric = (data/max_data + code/max_code + ram/max_ram) / 3
This normalises each value against the maximum value in the database. Then all are weighted equally
Then the total is normalised into the range [0, 1]
:param data: The dedicated data storage for the system
:param code: The memory available to code or additional storage space
:param ram: The memory in ram, important for more complicated processes onboard the satellite
:return: A numerical value that contains the the calculated metric for the system
"""
# max_data = 15000 # Matching an ideal state
# max_code = 100 # Near enough to the maximum value to be an ideal state
# max_ram = 128 # Less than the maximum, but reaches an ideal state
#
# data_met = (data / max_data).clip(min=0, max=1)
# code_met = (code / max_code).clip(min=0, max=1)
# ram_met = (ram / max_ram).clip(min=0, max=1)
#
# return np.abs((data_met + code_met + ram_met) / 3).clip(min=0, max=1)
"""
The above code was the old CPU metric in an attempt to calculate performance. As it is no longer utilised, and is
simply a binary check for the presence of a flightboard.
Totals is used to find if there is a positive amount of memory, which is present on all flightboards.
It is simply the sum of any of the categories of memory.
If the value is greater than 0, then it returns 1, else returns 0
"""
totals = data + code + ram
if totals > 0:
return 1
else:
return 0 |
def ensure_append_true_if_timed_append(
config,
extra_config,
app_type,
app_path,
):
"""
Sets the "APPEND" App Configuration variable automatically to "TRUE" if the "timed_append" option
is enabled.
:param config: Configuration dictionary (the one that gets exported to local.settings.json).
:param extra_config: Dictionary configuring the additional options passed by user in generator_config.json.
:param app_type: Application type.
:param app_path: Path to the output folder for the Function App.
:return: The local.settings.json dictionary currently being formed.
"""
if extra_config.get("timed_append", "false").lower() == "true":
config["Values"]["APPEND"] = "TRUE"
return config |
def text_convert(input) -> str:
"""
input: raw scraped html
"""
return "" if input is None else input.get_text().strip() |
def derive_sentiment_message_type(compound):
"""
Derives the type of the message based status of the sentiment
:param compound
:return: type of the message
"""
if compound > 0:
return "success"
elif compound == 0:
return "info"
else:
return "error" |
def set_to_zero(working_set, index):
"""Given a set and an index, set all elements to 0 after the index."""
if index == len(working_set) - 1:
return working_set
else:
for i in range(index + 1, len(working_set)):
working_set[i] = 0
return working_set |
def flattener(data):
"""
Flatten a nested dictionary. Namespace the keys with a period, assuming no periods in the keys.
:param dictionary: the nested dictionary
:return: the flattened dictionary with namespaces
>>> flattener([]) is None
True
>>> flattener(3) is None
True
>>> flattener({})
{}
>>> flattener({'key': 3, 'foo': {'a': 5, 'bar': {'baz': 8}}})
{'key': 3, 'foo.a': 5, 'foo.bar.baz': 8}
"""
if not isinstance(data, dict):
return None
def aux(dictionary, result=None, base_key=None):
if result is None:
result = {}
actual_key = f"{base_key}." if base_key is not None else ""
for key, value in dictionary.items():
if isinstance(value, dict):
result = aux(value, result, actual_key + key)
else:
result[f"{actual_key}{key}"] = value
return result
return aux(data) |
def cap(number, min_, max_):
"""Cap a value between a lower and/or upper bound (inclusive)"""
if min_ is not None and number < min_:
return min_
if max_ is not None and number > max_:
return max_
return number |
def get_errors(read, to_db=True):
""" list of all the errors from the read variables
:param read: the log file content
:param to_db: if False than the type on each item in the list is text, otherwise the type is json """
err = []
div = read.split("ERROR ")
for item in div:
if item.count("Error: ") > 0:
temp = item.split("Error: ")
full_error = "ERROR " + temp[0] + "Error: " + temp[1].split("\n")[0]
elif item.count("Schema: ") > 0:
temp = item.split("Schema: ")
full_error = "ERROR " + temp[0] + "Schema: " + temp[1].split("\n")[0]
else:
continue
if to_db:
error = full_error.split("\n")[0].split(" ")
variables = {
"level": error[0],
"date": error[1],
"time": error[2],
"error_body": "\n".join(full_error.split("\n")[1:-1]),
"value": full_error.split("\n")[-1],
"url": error[6],
"request_type": error[7][1:-1]
}
err.append(variables)
else:
err.append(full_error)
return err |
def _nths(x,n):
""" Given a list of sequences, returns a list of all the Nth elements of
all the contained sequences
"""
return [l[n] for l in x] |
def nodes_to_int(head, reverse=True):
"""Converts linked list number structure to number string
for testing purposes.
:returns Number string representing node structure
"""
if head is None:
return None
curr = head
num_str = str(curr.data)
while curr.next is not None:
curr = curr.next
num_str += str(curr.data)
if reverse:
num_str = num_str[::-1]
return num_str |
def check_dictionary(src2trg: dict) -> dict:
"""
Check validity of PanLex dictionary:
- Each source token only has one target token
- Source and target tokens are strings
:param src2trg dict: PanLex dictionary
:rtype dict: validated PanLex dictionary
"""
out = {}
for k, v in src2trg.items():
assert isinstance(k, str)
assert len(v) == 1
assert isinstance(v[0], str)
out[k] = v[0]
return out |
def update_dcid(dcid, prop, val):
"""Given a dcid and pv, update the dcid to include the pv.
Args:
dcid: current dcid
prop: the property of the value to add to the dcid
val: the value to add to the dcid
Returns:
updated dcid as a string
"""
val_dcid = val.split(":")[-1]
if val_dcid[0:4] == "WHO/":
val_dcid = val_dcid[4:]
if prop == "age":
val_dcid = val_dcid.replace("+", "PLUS").replace("-", "TO").replace(
"TOPLUS", "PLUS")
return dcid + "_" + val_dcid |
def turn_psql_url_into_param(postgres_url: str) -> dict:
"""
>>> turn_psql_url_into_param(
... 'postgres://USERNAME:PASSWORD@URL:PORT/USER?sslmode=SSLMODE') == {
... 'db_user':'USERNAME', 'db_password': 'PASSWORD', 'db_host': 'URL', 'db_port':
... 'PORT', 'db_name': 'USER', 'sslmode': 'SSLMODE'}
True
>>> turn_psql_url_into_param(
... 'USERNAME:PASSWORD@URL:PORT/USER?sslmode=SSLMODE')
Traceback (most recent call last):
...
AttributeError: The database URL is not well formated
>>> turn_psql_url_into_param(
... 'postgres://USERNAME:PASSWORD@URL:PORT/USER') == {
... 'db_user': 'USERNAME', 'db_password': 'PASSWORD', 'db_host': 'URL',
... 'db_port': 'PORT', 'db_name': 'USER'}
True
>>> turn_psql_url_into_param("postgresql://") == {}
True
>>> turn_psql_url_into_param('postgresql://localhost') == {'db_host':
... 'localhost'}
True
>>> turn_psql_url_into_param('postgresql://localhost:5433') == {'db_host':
... 'localhost', 'db_port': '5433'}
True
>>> turn_psql_url_into_param('postgresql://localhost/mydb') == {'db_host':
... 'localhost', 'db_name': 'mydb'}
True
>>> turn_psql_url_into_param('postgresql://user@localhost') == {'db_host':
... 'localhost', 'db_user': 'user'}
True
>>> turn_psql_url_into_param('postgresql://user:secret@localhost') == {
... 'db_host': 'localhost', 'db_user': 'user', 'db_password': 'secret'}
True
>>> turn_psql_url_into_param('postgresql://oto@localhost/ther?'
... 'connect_timeout=10&application_name=myapp') == {
... 'db_host': 'localhost', 'db_user': 'oto', 'db_name': 'ther',
... 'connect_timeout': '10', 'application_name': 'myapp'}
True
"""
if not postgres_url.startswith(("postgres://", "postgresql://")):
raise AttributeError("The database URL is not well formated")
response = {}
# Get parameters
params_start = postgres_url.rfind("?")
if not params_start == -1:
params = postgres_url[params_start + 1 :]
params = [param.split("=") for param in params.split("&")]
for param in params:
response[param[0]] = param[1]
user_and_db_info = postgres_url[postgres_url.find("://") + 3 : params_start]
else:
user_and_db_info = postgres_url[postgres_url.find("://") + 3 :]
if not user_and_db_info:
return response
# User information
if "@" in user_and_db_info:
user_info, db_info = tuple(user_and_db_info.split("@"))
user_info = user_info.split(":")
response["db_user"] = user_info[0]
if len(user_info) > 1:
response["db_password"] = user_info[1]
else:
db_info = user_and_db_info
# Database information
db_info = db_info.split("/")
if len(db_info) > 1:
response["db_name"] = db_info[1]
url_and_port = db_info[0]
url_and_port = url_and_port.split(":")
response["db_host"] = url_and_port[0]
if len(url_and_port) > 1:
response["db_port"] = url_and_port[1]
return response |
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the
# list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(parameter) for parameter in params])
result = [
test for test in tests_with_params if get_test_time(test) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result |
def vol_color(errupt_date):
"""Returns string representing color of the volcano marker based on last erruption date.
Parameters
----------
errupt_date : str
Describes last erruption date according to data format.
One of the following 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'U', 'Q', '?'
Returns
-------
str
string representing color
"""
if errupt_date == 'D1':
return 'red'
elif errupt_date in ['D2', 'D3', 'D4', 'D5']:
return 'orange'
else:
return 'green' |
def convert_bool(string):
"""Check whether string is boolean."""
if string == "True":
return True, True
elif string == "False":
return True, False
else:
return False, False |
def FilterStations(MasterStationList,data_files):
"""
Filter the files to download by those stations required
If station of remote file exists in master list, want to download,
else not.
"""
DownloadStationList=[]
for dfile in data_files:
station=dfile.filen.split('.')[0][:-5]
if station in MasterStationList:
# We want this file
DownloadStationList.append(dfile)
return DownloadStationList |
def calculate_time(start, end):
"""Pretty prints the time taken for an operation.
Args:
start (float): Start time of an operation.
end (float): End time of an operation.
Returns:
str: Pretty format the time taken for an operation.
"""
time_taken = int(round((end - start), 0))
day = time_taken // 86400
hour = (time_taken - (day * 86400)) // 3600
minutes = (time_taken - ((day * 86400) + (hour * 3600))) // 60
seconds = time_taken - ((day * 86400) + (hour * 3600) + (minutes * 60))
if day != 0:
output = '{} days {} hours {} min {} sec'.format(day, hour, minutes,
seconds)
elif hour != 0:
output = '{} hours {} min {} sec'.format(hour, minutes, seconds)
elif minutes != 0:
output = '{} min {} sec'.format(minutes, seconds)
else:
output = '{} sec'.format(seconds)
return output |
def decimal_to_binary(num=2):
"""
Input: num, an int
Returns binary representation of num
"""
if num < 0:
is_neg = True
num = abs(num)
else:
is_neg = False
result = ''
if num == '0':
result = '0'
while num > 0:
result = str(num % 2) + result
num //= 2
if is_neg:
result = '-' + result
return result |
def gemini_path(request):
"""Return the path of a gemini database"""
gemini_db = "tests/fixtures/HapMapFew.db"
return gemini_db |
def flatten_weights(list_of_mats):
"""
Flatten the weights for storage
"""
flat_ndlist = []
for arr in list_of_mats:
flat_ndlist.append(arr.flatten().tolist())
flat_list = [item for sublist in flat_ndlist for item in sublist]
return flat_list |
def _phone2char(phones, char_max_len):
"""_phone2char."""
ini = -1
chars = []
phones_index = 0
for phone in phones:
if phone != ini:
chars.append(phone)
ini = phone
phones_index += 1
if len(chars) == char_max_len:
break
return chars, phones_index |
def is_covered(read, position):
"""Returns true if position is covered by read, otherwise false."""
return position >= read[0] and position < read[0]+read[1] |
def hex_to_decimal(number):
"""
Calculates the decimal of the given hex number.
:param number: hex number in string or integer format
:return integer of the equivalent decimal number
"""
decimal = []
decimal_equivalents = {'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15}
number = list(str(number)[::-1])
for i in range(len(number)):
try:
if int(number[i]) < 10:
decimal.append(int(number[i]) * (16 ** i))
except ValueError:
decimal.append(decimal_equivalents[number[i]] * (16 ** i))
return sum(decimal) |
def find_next_biggest_with_same_1s(n):
"""Finds the next biggest number with the same number of 1 bits.
- Flips the rightmost 0 that has ones on its right (increases the value)
- Rearrange 1s on its right to lowest positions and flips highest of them
(decreases the value and creates same number of 1s)
Example:
xxxx_0_111_0000 --> xxxx_1_111_0000 --> xxxx_1_000_0011
Args:
n: A positive integer.
Raises:
ValueError on non-positive input.
Returns:
Next biggest number with same number of 1s.
"""
if n <= 0:
raise ValueError('Input argument has to be positive.')
temp = n
# Count number rightmost 0s
num_of_zeros = 0
while temp & 1 == 0:
temp >>= 1
num_of_zeros += 1
# Count number of 1s to the left of 0s
num_of_ones = 0
while temp & 1 == 1:
temp >>= 1
num_of_ones += 1
# Flip next 0 to 1
n = n ^ (1 << (num_of_ones + num_of_zeros))
# Create a 0...01...1 mask, then invert it to get 1...10...0
mask = ~((1 << (num_of_ones + num_of_zeros)) - 1)
n = n & mask
# Create a 0...01...1 mask with number of 1s = (num_of_ones - 1)
mask = (1 << (num_of_ones - 1)) - 1
n = n | mask
return n |
def has_length(dataset):
"""
Checks if the dataset implements __len__() and it doesn't raise an error
"""
try:
return len(dataset) is not None
except TypeError:
# TypeError: len() of unsized object
return False |
def integer_kth_root(n: int, k: int) -> int:
"""
Find the integer k-th root of n.
Credits:
http://stackoverflow.com/questions/15978781/how-to-find-integer-nth-roots
Solution based on Newton's method.
:param k: The root exponent.
:param n: The number to be rooted.
:return: The greatest integer less than or equal to the k-th root of n.
"""
u, s = n, n + 1
while u < s:
s = u
t = (k - 1) * s + n // pow(s, k - 1)
u = t // k
return s |
def safe_value_fallback2(dict1: dict, dict2: dict, key1: str, key2: str, default_value=None):
"""
Search a value in dict1, return this if it's not None.
Fall back to dict2 - return key2 from dict2 if it's not None.
Else falls back to None.
"""
if key1 in dict1 and dict1[key1] is not None:
return dict1[key1]
else:
if key2 in dict2 and dict2[key2] is not None:
return dict2[key2]
return default_value |
def mod_inv(a, p):
"""Return x with x * a == 1 (mod p) for integers a, p.
p >= 1 must hold.
Raise ZerodivsionError if inversion is not possible
"""
assert p >= 1
a1, a2, x1, x2 = p, a % p, 0, 1
# Invert a modulo p with the extended Euclidean algorithm
while a2 > 0:
# assert x1 * a % p == a1 % p and x2 * a % p == a2 % p
q, r = divmod(a1, a2)
a1, a2, x1, x2 = a2, r, x2, x1 - q * x2
if a1 != 1:
raise ZeroDivisionError("Cannot invert %d mod %d" % (a, p))
# assert x1 * a % p == 1
return x1 % p |
def _adjust_files(xs, adjust_fn):
"""Walk over key/value, tuples applying adjust_fn to files.
"""
if isinstance(xs, dict):
if "path" in xs:
out = {}
out["path"] = adjust_fn(xs["path"])
for k, vs in xs.items():
if k != "path":
out[k] = _adjust_files(vs, adjust_fn)
return out
else:
out = {}
for k, vs in xs.items():
out[k] = _adjust_files(vs, adjust_fn)
return out
elif isinstance(xs, (list, tuple)):
return [_adjust_files(x, adjust_fn) for x in xs]
else:
return xs |
def affiliation(n, C):
"""Return the affiliation of n for a given
community C
Parameters
----------
n : object
C : list of sets
Community structure to search in
Returns
-------
aff : list of ints
Index of affiliation of n
Examples
--------
>>> C = [set(['a','b']),set(['a'])]
>>> nx.affiliation('a',C)
[0,1]
>>> nx.affiliation('b',C)
[0]
"""
aff = []
i = 0
for c in C:
if n in c:
aff.append(i)
i += 1
return aff |
def _GetParallelUploadTrackerFileLinesForComponents(components):
"""Return a list of the lines for use in a parallel upload tracker file.
The lines represent the given components, using the format as described in
_CreateParallelUploadTrackerFile.
Args:
components: A list of ObjectFromTracker objects that were uploaded.
Returns:
Lines describing components with their generation for outputting to the
tracker file.
"""
lines = []
for component in components:
generation = None
generation = component.generation
if not generation:
generation = ''
lines += [component.object_name, str(generation)]
return lines |
def jaccard(xs, ys):
"""
Computes the Jaccard Similarity Coefficient
:param xs: set containing all the words in the random paper
:param ys: set containing all the words in the every paper
:return: return the Jaccard index, how similar are both sets
"""
return float(len(set(xs) & set(ys))) / len(set(xs) | set(ys)) |
def sorted_merge(a, b):
""" 10.1 Sorted Merge: You are given two sorted arrays, A and B, where A
has a large enough buffer at the end to hold B. Write a method to merge B
into A in sorted order.
"""
if len(a) == 0 and len(b) == 0:
return []
if len(a) == 0:
return b
if len(b) == 0:
return a
ha, ta = a[0], a[1:]
hb, tb = b[0], b[1:]
if ha < hb:
return [ha] + sorted_merge(ta, b)
else:
return [hb] + sorted_merge(a, tb) |
def set_scrollbars_hidden(hidden: bool) -> dict:
"""
Parameters
----------
hidden: bool
Whether scrollbars should be always hidden.
**Experimental**
"""
return {"method": "Emulation.setScrollbarsHidden", "params": {"hidden": hidden}} |
def str_delimited(results, header=None, delimiter="\t"):
"""
Given a tuple of tuples, generate a delimited string form.
>>> results = [["a","b","c"],["d","e","f"],[1,2,3]]
>>> print(str_delimited(results,delimiter=","))
a,b,c
d,e,f
1,2,3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
returnstr = ""
if header is not None:
returnstr += delimiter.join(header) + "\n"
return returnstr + "\n".join([delimiter.join([str(m) for m in result])
for result in results]) |
def get_dot_file_path(gname):
"""
For a graph named gname, this method returns the path to its dot file in
the dot_atlas directory.
Parameters
----------
gname : str
Returns
-------
str
"""
return "dot_atlas/good_bad_trols_" + gname + ".dot" |
def ns_id(tagname, suds_ns):
"""Adds namespace to tag"""
return '{{{0}}}{1}'.format(suds_ns[1], tagname) |
def _to_tuple(x):
"""
Converts an object into a tuple. If the object is not iterable, creates a one-element tuple containing the object.
Parameters
----------
x : object
Returns
-------
out : tuple
Tuple representing the object.
"""
try:
return tuple(x)
except TypeError:
return x, |
def build_message(attr_name, val1, val2, details=""):
""" Build error message for is_has_traits_almost_equal and
is_val_almost_equal to return.
"""
type1 = type(val1)
type2 = type(val2)
msg = "Different {} {}. Types: {} vs {}. Values: \n{} \nvs \n{}"
msg = msg.format(attr_name, details, type1, type2, repr(val1), repr(val2))
return msg |
def transformBigValue(value):
"""
===========================================================================
Transform numerical input data using
X/100
===========================================================================
**Args**:
**Returns**:
None
"""
r = value/100000
if r>1: r=1.0
return r |
def _swift_bool(bstring):
"""
Convert a SWIFT-VOevent style boolean string ('true'/'false') to a bool.
"""
if bstring == 'true':
return True
elif bstring == 'false':
return False
else:
raise ValueError("This string does not appear to be a SWIFT VOEvent "
"boolean: %s" % bstring) |
def strToBool(string):
""" Convert `"True"` and `"False"` to their boolean counterparts. """
if string.lower() == "true":
return True
elif string.lower() == "false":
return False
else:
err_str = f"{string} is not a boolean value!"
raise ValueError(err_str) |
def updateTasks(newTasks, oldTasks):
"""
Compare the old tasks against the new ones. This function is essential due
to the fact that a jupyter-notebook user may rewrite a task and the latest
version is the one that needs to be kept.
:param newTask: new Task code
:param tasks: existing tasks
return: dictionary with the merging result.
"""
if not newTasks:
# when newTasks is empty, means that the update was triggered by a class
# task. No need to update as a tasks since the class has already been updated
pass
else:
newTaskName = list(newTasks.keys())[0]
if newTaskName in oldTasks and (not newTasks[newTaskName] == oldTasks[newTaskName]):
print(("WARNING! Task " + newTaskName + " has been redefined (the previous will be deprecated)."))
oldTasks[newTaskName] = newTasks[newTaskName]
return oldTasks |
def boto_all(func, *args, **kwargs):
"""
Iterate through all boto next_token's
"""
resp = {}
ret = []
while True:
resp = func(*args, **kwargs)
for val in resp.values():
if type(val) is list:
ret.extend(val)
if not resp.get('NextToken', None):
break
kwargs['NextToken'] = ret[-1].NextToken
return ret |
def is_iterable(an_object, include_strings=True):
""" Returns True if an object is iterable, False otherwise. Iterable
types include lists, tuples, dicts, strings, numpy arrays, etc.
If include_strings is False, then strings are not considered iterable.
This is useful because often we use is_iterable() to decide whether
something is an object or a list of objects, and while one *can* loop
over the contents of a string with 'for item in some_string', one
doesn't typically want to.
"""
try:
iter(an_object)
rc = True
except TypeError:
rc = False
if rc and (not include_strings) and isinstance(an_object, str):
rc = False
return rc |
def fluctuations(N_avg,N2_avg,**kwargs):
"""Calculate fluctuations in N from averages <N> and <N^2>."""
return N2_avg - N_avg**2 |
def format_list(data):
"""Return a formatted strings
:param data: a list of strings
:rtype: a string formatted to a,b,c
"""
return ', '.join(sorted(data)) |
def fibonacci(n):
"""TO RETURNS THE nth VALUE OF THE FABONACCI SERIES."""
fib = [0, 1]
if n < 0:
print('The number cannot be negative.')
return('The number cannot be negative.')
elif n > 999:
print('The number is too big. Please enter a number between 0 and 999.')
return('The number is too big. Please enter a number between 0 and 999.')
elif n < 2:
print(fib[n])
return(fib[n])
elif n >= 2:
for what in range(2, n+1):
fib.append(fib[-1]+fib[-2])
print(fib[-1])
return(fib[-1]) |
def get_objects_name(objects):
""" Retrieves the names of objects.
Parameters:
objects (list): Objects to get names.
Returns:
list: Object names.
"""
names = []
for object in objects:
if object.name[-5:-3] == '}.':
names.append(object.name[:-4])
else:
names.append(object.name)
return names |
def get_info(obj):
"""
get info from account obj
:type obj: account object
:param obj: the object of account
:return: dict of account info
"""
if obj:
return dict(db_instance_id=obj.dbinstance_id,
account_name=obj.account_name,
account_status=obj.account_status,
account_type=obj.account_type,
account_description=obj.account_description,
database_privileges=obj.database_privileges)
return {} |
def make_pairs(seq1, seq2, merge_list, accumulator=0):
""" PART D: make_pairs() takes in two sequences (seq1, seq2) and returns a list of tuples. Each tuple contains a value from seq1 whose index matches the value in seq2. The "accumulator" argument is set to a default value of zero. On each recursive call the accumulator is incremented by 1. merge_list is passed in as an argument because the list is mutable. """
# Get the smaller sequence
smaller = seq1 if len(seq1) <= len(seq2) else seq2
if accumulator == len(smaller): # base case
return merge_list
else: # recursive case
# append values from seq1 whose index matches the index in seq2.
merge_list.append((seq1[accumulator], seq2[accumulator]))
accumulator += 1
return make_pairs(seq1, seq2, merge_list, accumulator) |
def flatten_list(lst):
"""Function flattening list of lists.
Arguments:
lst - list (of lists).
Returns:
flattened - flattened list."""
flattened = []
for l in lst:
if isinstance(l, list):
flattened += flatten_list(l)
else:
flattened.append(l)
return flattened |
def maybe(pattern):
""" a pattern that may or may not be present
:param pattern: an `re` pattern
:type pattern: str
:rtype: str
"""
return r'(?:{:s})?'.format(pattern) |
def rshift(integer: int, shift: int) -> int:
"""Logical right binary shift."""
if integer >= 0:
return integer >> shift
return (integer + 0x100000000) >> shift |
def logic(index, skip):
""" check modulus """
if index % skip == 0:
return True
return False |
def _find_one(target, name_or_provider):
"""Returns a list with the single given provider for a target.
This function supports legacy providers (referenced by name) and modern
providers (referenced by their provider object).
Args:
target: A target or list of targets whose providers should be
searched. This argument may also safely be None, which causes the empty
list to be returned.
name_or_provider: The string name of the legacy provider or the reference
to a modern provider to return.
Returns:
A list of providers from the given targets. This list may have fewer
elements than the given number of targets (including being empty) if not all
targets propagate the provider.
"""
if not target:
return None
# If name_or_provider is a string, find it as a legacy provider.
if type(name_or_provider) == type(""):
return getattr(target, name_or_provider)
# Otherwise, find it as a modern provider.
return target[name_or_provider] if name_or_provider in target else None |
def should_be_deactivated(message):
"""
Determines whether a message stands for an option to be turned off.
Args:
message(str): A message to test
Returns:
bool: True if the message is negative, False otherwise
"""
NEGATIVE_TERMS = ["deactivated", "disabled", "false", "no", "none", "off"]
return message.strip().lower() in NEGATIVE_TERMS |
def transformer(text):
"""Scrieti o functie care converteste in sir de caractere.
Convertire din UpperCamelCase in lowercase_with_underscores.
"""
out = ""
for c in text:
if c.isupper() and out == "":
out += c.lower()
elif c.isupper and not out[-1].isalnum():
out += c.lower()
elif c.isupper() and out != "":
out += "_" + c.lower()
else:
out += c
return out |
def complement(base):
""" Complement nucleotide """
d = {'A':'T', 'T':'A', 'G':'C', 'C':'G'}
if base in d:
return d[base]
return base |
def nocomment(astr, com='!'):
"""
just like the comment in python.
removes any text after the phrase 'com'
"""
alist = astr.splitlines()
for i in range(len(alist)):
element = alist[i]
pnt = element.find(com)
if pnt != -1:
alist[i] = element[:pnt]
return '\n'.join(alist) |
def walk_dict(d, path):
"""Walks a dict given a path of keys.
For example, if we have a dict like this::
d = {
'a': {
'B': {
1: ['hello', 'world'],
2: ['hello', 'again'],
}
}
}
Then ``walk_dict(d, ['a', 'B', 1])`` would return
``['hello', 'world']``.
"""
if not path:
return d
return walk_dict(d[path[0]], path[1:]) |
def fmttimeshort(n):
""" fmttimeshort """
if n == 0:
return 'complete!'
try:
n = int(n)
assert n >= 0 and n < 5184000 # 60 days
except:
return '<unknown>'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d >= 7:
return '-'
elif d > 0:
return '%dd %02d:%02d:%02d' % (d, h, m, s)
else:
return '%02d:%02d:%02d' % (h, m, s) |
def retrieve_time_from_location(location):
"""Used with the sort function."""
return location['time'] |
def cpu_profile(func, repetitions=1):
"""Profile the function `func`.
"""
import cProfile
import pstats
profile = cProfile.Profile()
profile.enable()
success = True
for _ in range(repetitions):
success = func()
if not success:
break
profile.disable()
# after your program ends
stats = pstats.Stats(profile)
stats.strip_dirs()
stats.sort_stats('time').print_stats(20)
return success |
def format_int(n: int) -> bytes:
"""Format an integer using a variable-length binary encoding."""
if n < 128:
a = [n]
else:
a = []
while n > 0:
a.insert(0, n & 0x7f)
n >>= 7
for i in range(len(a) - 1):
# If the highest bit is set, more 7-bit digits follow
a[i] |= 0x80
return bytes(a) |
def rhombus_area(diagonal_1, diagonal_2):
"""Returns the area of a rhombus"""
area = (diagonal_1 * diagonal_2) / 2
return area |
def break_str(prereqs):
"""
Break a string into multiline text.
"""
return ' \\\n\t'.join(prereqs) |
def get_set_vertices(g_or_n):
"""Get number of vertices from the graph, or just pass n itself
Return set of graph vertices, V = {1, 2,...n}
"""
if isinstance(g_or_n, int):
n = g_or_n
else:
g = g_or_n
n = len(g.vs)
V = list(range(1, n+1))
return V, n |
def y(x: float, slope: float, initial_offset: float = 0) -> float:
"""Same function as above, but this time with type annotations!"""
return slope * x + initial_offset |
def mod_test(equation, val):
"""
Comparison for the modulo binary search.
:equation: Equation to test
:val: Input to the division
"""
r1 = equation(val)
if r1 == None:
return None
if r1 == 0:
return 0
elif r1 != val:
return 1
elif r1 == val:
return -1 |
def docker_command(command):
"""Format command as needed to run inside your db docker container.
:param str command: postgres command
:return: Terminal command
:rtype: str
"""
# Template for executing commands inside the db container
command_template = 'docker-compose exec -T db psql ' \
'--username postgres --dbname postgres ' \
'--command "%s"'
return command_template % command |
def _to_capitalize(word: str):
""" :Header segment to capitalize
"""
return '-'.join([i.capitalize() for i in word.split('-')]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.